blob: b9035ff6fb25c875a5764d79e0cb9820080ba66b [file] [log] [blame]
bsalomon6251d172014-10-15 10:50:36 -07001/*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrFragmentProcessor_DEFINED
9#define GrFragmentProcessor_DEFINED
10
Ethan Nicholasd4efe682019-08-29 16:10:13 -040011#include "src/gpu/GrCoordTransform.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050012#include "src/gpu/GrProcessor.h"
Chris Dalton7eb5c0f2019-05-23 15:15:47 -060013#include "src/gpu/ops/GrOp.h"
bsalomon6251d172014-10-15 10:50:36 -070014
egdaniel64c47282015-11-13 06:54:19 -080015class GrGLSLFragmentProcessor;
Chris Dalton1c548942018-05-22 13:09:48 -060016class GrPaint;
bsalomona624bf32016-09-20 09:12:47 -070017class GrPipeline;
joshualitteb2a6762014-12-04 11:35:33 -080018class GrProcessorKeyBuilder;
Brian Salomon94efbf52016-11-29 13:43:05 -050019class GrShaderCaps;
Brian Osmance425512017-03-22 14:37:50 -040020class GrSwizzle;
bsalomon6251d172014-10-15 10:50:36 -070021
Ethan Nicholasf7b88202017-09-18 14:10:39 -040022/** Provides custom fragment shader code. Fragment processors receive an input color (half4) and
bsalomon6251d172014-10-15 10:50:36 -070023 produce an output color. They may reference textures and uniforms. They may use
24 GrCoordTransforms to receive a transformation of the local coordinates that map from local space
25 to the fragment being processed.
26 */
Brian Salomone782f842018-07-31 13:53:11 -040027class GrFragmentProcessor : public GrProcessor {
bsalomon6251d172014-10-15 10:50:36 -070028public:
Brian Salomone782f842018-07-31 13:53:11 -040029 class TextureSampler;
30
bsalomon87ba62e2015-09-22 06:41:59 -070031 /**
32 * In many instances (e.g. SkShader::asFragmentProcessor() implementations) it is desirable to
33 * only consider the input color's alpha. However, there is a competing desire to have reusable
34 * GrFragmentProcessor subclasses that can be used in other scenarios where the entire input
35 * color is considered. This function exists to filter the input color and pass it to a FP. It
36 * does so by returning a parent FP that multiplies the passed in FPs output by the parent's
37 * input alpha. The passed in FP will not receive an input color.
38 */
Mike Reed28eaed22018-02-01 11:24:53 -050039 static std::unique_ptr<GrFragmentProcessor> MulChildByInputAlpha(
40 std::unique_ptr<GrFragmentProcessor> child);
41
42 /**
43 * Like MulChildByInputAlpha(), but reverses the sense of src and dst. In this case, return
44 * the input modulated by the child's alpha. The passed in FP will not receive an input color.
45 *
46 * output = input * child.a
47 */
48 static std::unique_ptr<GrFragmentProcessor> MulInputByChildAlpha(
49 std::unique_ptr<GrFragmentProcessor> child);
bsalomonf1b7a1d2015-09-28 06:26:28 -070050
51 /**
Brian Salomon22af73f2017-01-26 11:25:12 -050052 * This assumes that the input color to the returned processor will be unpremul and that the
53 * passed processor (which becomes the returned processor's child) produces a premul output.
54 * The result of the returned processor is a premul of its input color modulated by the child
55 * processor's premul output.
bsalomonf1b7a1d2015-09-28 06:26:28 -070056 */
Brian Salomonaff329b2017-08-11 09:40:37 -040057 static std::unique_ptr<GrFragmentProcessor> MakeInputPremulAndMulByOutput(
58 std::unique_ptr<GrFragmentProcessor>);
bsalomonf1b7a1d2015-09-28 06:26:28 -070059
60 /**
bsalomone25eea42015-09-29 06:38:55 -070061 * Returns a parent fragment processor that adopts the passed fragment processor as a child.
62 * The parent will ignore its input color and instead feed the passed in color as input to the
63 * child.
bsalomonf1b7a1d2015-09-28 06:26:28 -070064 */
Brian Salomonaff329b2017-08-11 09:40:37 -040065 static std::unique_ptr<GrFragmentProcessor> OverrideInput(std::unique_ptr<GrFragmentProcessor>,
Brian Salomonc0d79e52019-04-10 15:02:11 -040066 const SkPMColor4f&,
67 bool useUniform = true);
bsalomon87ba62e2015-09-22 06:41:59 -070068
bsalomone25eea42015-09-29 06:38:55 -070069 /**
dvonbeckc526da92016-07-20 11:20:30 -070070 * Returns a fragment processor that premuls the input before calling the passed in fragment
71 * processor.
72 */
Brian Salomonaff329b2017-08-11 09:40:37 -040073 static std::unique_ptr<GrFragmentProcessor> PremulInput(std::unique_ptr<GrFragmentProcessor>);
dvonbeckc526da92016-07-20 11:20:30 -070074
75 /**
Brian Osmance425512017-03-22 14:37:50 -040076 * Returns a fragment processor that calls the passed in fragment processor, and then swizzles
77 * the output.
78 */
Brian Salomonaff329b2017-08-11 09:40:37 -040079 static std::unique_ptr<GrFragmentProcessor> SwizzleOutput(std::unique_ptr<GrFragmentProcessor>,
80 const GrSwizzle&);
Brian Osmance425512017-03-22 14:37:50 -040081
82 /**
bsalomone25eea42015-09-29 06:38:55 -070083 * Returns a fragment processor that runs the passed in array of fragment processors in a
84 * series. The original input is passed to the first, the first's output is passed to the
85 * second, etc. The output of the returned processor is the output of the last processor of the
86 * series.
bungeman06ca8ec2016-06-09 08:01:03 -070087 *
88 * The array elements with be moved.
bsalomone25eea42015-09-29 06:38:55 -070089 */
Brian Salomonaff329b2017-08-11 09:40:37 -040090 static std::unique_ptr<GrFragmentProcessor> RunInSeries(std::unique_ptr<GrFragmentProcessor>*,
91 int cnt);
bsalomonac856c92015-08-27 06:30:17 -070092
Brian Salomon0e05a822017-07-25 09:43:22 -040093 /**
94 * Makes a copy of this fragment processor that draws equivalently to the original.
Brian Salomon96271cd2017-07-31 16:27:23 -040095 * If the processor has child processors they are cloned as well.
Brian Salomon0e05a822017-07-25 09:43:22 -040096 */
Brian Salomonaff329b2017-08-11 09:40:37 -040097 virtual std::unique_ptr<GrFragmentProcessor> clone() const = 0;
Brian Salomon0e05a822017-07-25 09:43:22 -040098
egdaniel57d3b032015-11-13 11:57:27 -080099 GrGLSLFragmentProcessor* createGLSLInstance() const;
joshualitteb2a6762014-12-04 11:35:33 -0800100
Brian Salomon94efbf52016-11-29 13:43:05 -0500101 void getGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const {
egdaniel57d3b032015-11-13 11:57:27 -0800102 this->onGetGLSLProcessorKey(caps, b);
wangyix4b3050b2015-08-04 07:59:37 -0700103 for (int i = 0; i < fChildProcessors.count(); ++i) {
egdaniel57d3b032015-11-13 11:57:27 -0800104 fChildProcessors[i]->getGLSLProcessorKey(caps, b);
wangyix4b3050b2015-08-04 07:59:37 -0700105 }
106 }
107
Brian Salomone782f842018-07-31 13:53:11 -0400108 int numTextureSamplers() const { return fTextureSamplerCnt; }
109 const TextureSampler& textureSampler(int i) const;
110
bsalomona624bf32016-09-20 09:12:47 -0700111 int numCoordTransforms() const { return fCoordTransforms.count(); }
bsalomon6251d172014-10-15 10:50:36 -0700112
113 /** Returns the coordinate transformation at index. index must be valid according to
114 numTransforms(). */
115 const GrCoordTransform& coordTransform(int index) const { return *fCoordTransforms[index]; }
116
Ethan Nicholasd4efe682019-08-29 16:10:13 -0400117 const SkTArray<GrCoordTransform*, true>& coordTransforms() const {
joshualittabb52a12015-01-13 15:02:10 -0800118 return fCoordTransforms;
119 }
120
wangyix4b3050b2015-08-04 07:59:37 -0700121 int numChildProcessors() const { return fChildProcessors.count(); }
122
bsalomonac856c92015-08-27 06:30:17 -0700123 const GrFragmentProcessor& childProcessor(int index) const { return *fChildProcessors[index]; }
wangyix4b3050b2015-08-04 07:59:37 -0700124
Robert Phillips82774f82019-06-20 14:38:27 -0400125 SkDEBUGCODE(bool isInstantiated() const;)
Robert Phillips9bee2e52017-05-29 12:37:20 -0400126
joshualitt290c09b2014-12-19 13:45:20 -0800127 /** Do any of the coordtransforms for this processor require local coords? */
Brian Salomon587e08f2017-01-27 10:59:27 -0500128 bool usesLocalCoords() const { return SkToBool(fFlags & kUsesLocalCoords_Flag); }
joshualitt290c09b2014-12-19 13:45:20 -0800129
Ethan Nicholasd4efe682019-08-29 16:10:13 -0400130 bool computeLocalCoordsInVertexShader() const {
131 return SkToBool(fFlags & kComputeLocalCoordsInVertexShader_Flag);
132 }
133
134 void setComputeLocalCoordsInVertexShader(bool value) const {
135 if (value) {
136 fFlags |= kComputeLocalCoordsInVertexShader_Flag;
137 } else {
138 fFlags &= ~kComputeLocalCoordsInVertexShader_Flag;
139 }
140 for (GrCoordTransform* transform : fCoordTransforms) {
141 transform->setComputeInVertexShader(value);
142 }
143 for (const auto& child : fChildProcessors) {
144 child->setComputeLocalCoordsInVertexShader(value);
145 }
146 }
147
Brian Salomon587e08f2017-01-27 10:59:27 -0500148 /**
Brian Salomonf3b995b2017-02-15 10:22:23 -0500149 * A GrDrawOp may premultiply its antialiasing coverage into its GrGeometryProcessor's color
150 * output under the following scenario:
151 * * all the color fragment processors report true to this query,
152 * * all the coverage fragment processors report true to this query,
153 * * the blend mode arithmetic allows for it it.
154 * To be compatible a fragment processor's output must be a modulation of its input color or
155 * alpha with a computed premultiplied color or alpha that is in 0..1 range. The computed color
156 * or alpha that is modulated against the input cannot depend on the input's alpha. The computed
157 * value cannot depend on the input's color channels unless it unpremultiplies the input color
158 * channels by the input alpha.
Brian Salomon587e08f2017-01-27 10:59:27 -0500159 */
Brian Salomonf3b995b2017-02-15 10:22:23 -0500160 bool compatibleWithCoverageAsAlpha() const {
161 return SkToBool(fFlags & kCompatibleWithCoverageAsAlpha_OptimizationFlag);
162 }
Brian Salomon587e08f2017-01-27 10:59:27 -0500163
164 /**
165 * If this is true then all opaque input colors to the processor produce opaque output colors.
166 */
167 bool preservesOpaqueInput() const {
168 return SkToBool(fFlags & kPreservesOpaqueInput_OptimizationFlag);
169 }
170
171 /**
172 * Tests whether given a constant input color the processor produces a constant output color
173 * (for all fragments). If true outputColor will contain the constant color produces for
174 * inputColor.
175 */
Brian Osman1d5b5982018-10-01 13:41:39 -0400176 bool hasConstantOutputForConstantInput(SkPMColor4f inputColor, SkPMColor4f* outputColor) const {
Brian Salomon587e08f2017-01-27 10:59:27 -0500177 if (fFlags & kConstantOutputForConstantInput_OptimizationFlag) {
178 *outputColor = this->constantOutputForConstantInput(inputColor);
179 return true;
180 }
181 return false;
182 }
183 bool hasConstantOutputForConstantInput() const {
184 return SkToBool(fFlags & kConstantOutputForConstantInput_OptimizationFlag);
185 }
dvonbeck9b03e7b2016-08-01 11:01:56 -0700186
joshualitteb2a6762014-12-04 11:35:33 -0800187 /** Returns true if this and other processor conservatively draw identically. It can only return
188 true when the two processor are of the same subclass (i.e. they return the same object from
bsalomon6251d172014-10-15 10:50:36 -0700189 from getFactory()).
190
joshualitteb2a6762014-12-04 11:35:33 -0800191 A return value of true from isEqual() should not be used to test whether the processor would
egdaniel57d3b032015-11-13 11:57:27 -0800192 generate the same shader code. To test for identical code generation use getGLSLProcessorKey
193 */
bsalomon7312ff82016-09-12 08:55:38 -0700194 bool isEqual(const GrFragmentProcessor& that) const;
bsalomon6251d172014-10-15 10:50:36 -0700195
joshualitt56995b52014-12-11 15:44:02 -0800196 /**
bsalomona624bf32016-09-20 09:12:47 -0700197 * Pre-order traversal of a FP hierarchy, or of the forest of FPs in a GrPipeline. In the latter
198 * case the tree rooted at each FP in the GrPipeline is visited successively.
bsalomonb58a2b42016-09-26 06:55:02 -0700199 */
bsalomona624bf32016-09-20 09:12:47 -0700200 class Iter : public SkNoncopyable {
201 public:
202 explicit Iter(const GrFragmentProcessor* fp) { fFPStack.push_back(fp); }
203 explicit Iter(const GrPipeline& pipeline);
Chris Dalton1c548942018-05-22 13:09:48 -0600204 explicit Iter(const GrPaint&);
bsalomona624bf32016-09-20 09:12:47 -0700205 const GrFragmentProcessor* next();
206
207 private:
208 SkSTArray<4, const GrFragmentProcessor*, true> fFPStack;
209 };
210
211 /**
bsalomonb58a2b42016-09-26 06:55:02 -0700212 * Iterates over all the Ts owned by a GrFragmentProcessor and its children or over all the Ts
213 * owned by the forest of GrFragmentProcessors in a GrPipeline. FPs are visited in the same
214 * order as Iter and each of an FP's Ts are visited in order.
bsalomona624bf32016-09-20 09:12:47 -0700215 */
Brian Salomone782f842018-07-31 13:53:11 -0400216 template <typename T, int (GrFragmentProcessor::*COUNT)() const,
217 const T& (GrFragmentProcessor::*GET)(int)const>
bsalomonb58a2b42016-09-26 06:55:02 -0700218 class FPItemIter : public SkNoncopyable {
bsalomona624bf32016-09-20 09:12:47 -0700219 public:
bsalomonb58a2b42016-09-26 06:55:02 -0700220 explicit FPItemIter(const GrFragmentProcessor* fp)
221 : fCurrFP(nullptr)
222 , fCTIdx(0)
223 , fFPIter(fp) {
224 fCurrFP = fFPIter.next();
225 }
226 explicit FPItemIter(const GrPipeline& pipeline)
bsalomona624bf32016-09-20 09:12:47 -0700227 : fCurrFP(nullptr)
228 , fCTIdx(0)
229 , fFPIter(pipeline) {
230 fCurrFP = fFPIter.next();
231 }
bsalomonb58a2b42016-09-26 06:55:02 -0700232
233 const T* next() {
234 if (!fCurrFP) {
235 return nullptr;
236 }
237 while (fCTIdx == (fCurrFP->*COUNT)()) {
238 fCTIdx = 0;
239 fCurrFP = fFPIter.next();
240 if (!fCurrFP) {
241 return nullptr;
242 }
243 }
244 return &(fCurrFP->*GET)(fCTIdx++);
245 }
bsalomona624bf32016-09-20 09:12:47 -0700246
247 private:
248 const GrFragmentProcessor* fCurrFP;
249 int fCTIdx;
250 GrFragmentProcessor::Iter fFPIter;
251 };
252
bsalomonb58a2b42016-09-26 06:55:02 -0700253 using CoordTransformIter = FPItemIter<GrCoordTransform,
bsalomonb58a2b42016-09-26 06:55:02 -0700254 &GrFragmentProcessor::numCoordTransforms,
255 &GrFragmentProcessor::coordTransform>;
256
Brian Salomon0bbecb22016-11-17 11:38:22 -0500257 using TextureAccessIter = FPItemIter<TextureSampler,
Brian Salomone782f842018-07-31 13:53:11 -0400258 &GrFragmentProcessor::numTextureSamplers,
259 &GrFragmentProcessor::textureSampler>;
bsalomonb58a2b42016-09-26 06:55:02 -0700260
Chris Dalton7eb5c0f2019-05-23 15:15:47 -0600261 void visitProxies(const GrOp::VisitProxyFunc& func);
Robert Phillipsb493eeb2017-09-13 13:10:52 -0400262
bsalomon6251d172014-10-15 10:50:36 -0700263protected:
Brian Salomon587e08f2017-01-27 10:59:27 -0500264 enum OptimizationFlags : uint32_t {
265 kNone_OptimizationFlags,
Brian Salomonf3b995b2017-02-15 10:22:23 -0500266 kCompatibleWithCoverageAsAlpha_OptimizationFlag = 0x1,
Brian Salomon587e08f2017-01-27 10:59:27 -0500267 kPreservesOpaqueInput_OptimizationFlag = 0x2,
268 kConstantOutputForConstantInput_OptimizationFlag = 0x4,
Brian Salomonf3b995b2017-02-15 10:22:23 -0500269 kAll_OptimizationFlags = kCompatibleWithCoverageAsAlpha_OptimizationFlag |
Brian Salomon587e08f2017-01-27 10:59:27 -0500270 kPreservesOpaqueInput_OptimizationFlag |
271 kConstantOutputForConstantInput_OptimizationFlag
272 };
273 GR_DECL_BITFIELD_OPS_FRIENDS(OptimizationFlags)
274
Brian Salomon6cd51b52017-07-26 19:07:15 -0400275 /**
276 * Can be used as a helper to decide which fragment processor OptimizationFlags should be set.
277 * This assumes that the subclass output color will be a modulation of the input color with a
278 * value read from a texture of the passed config and that the texture contains premultiplied
279 * color or alpha values that are in range.
Michael Ludwig257a03d2018-12-13 14:07:07 -0500280 *
281 * Since there are multiple ways in which a sampler may have its coordinates clamped or wrapped,
282 * callers must determine on their own if the sampling uses a decal strategy in any way, in
283 * which case the texture may become transparent regardless of the pixel config.
Brian Salomon6cd51b52017-07-26 19:07:15 -0400284 */
Michael Ludwig257a03d2018-12-13 14:07:07 -0500285 static OptimizationFlags ModulateForSamplerOptFlags(GrPixelConfig config, bool samplingDecal) {
286 if (samplingDecal) {
287 return kCompatibleWithCoverageAsAlpha_OptimizationFlag;
288 } else {
289 return ModulateForClampedSamplerOptFlags(config);
290 }
291 }
292
293 // As above, but callers should somehow ensure or assert their sampler still uses clamping
294 static OptimizationFlags ModulateForClampedSamplerOptFlags(GrPixelConfig config) {
Brian Salomon6cd51b52017-07-26 19:07:15 -0400295 if (GrPixelConfigIsOpaque(config)) {
296 return kCompatibleWithCoverageAsAlpha_OptimizationFlag |
297 kPreservesOpaqueInput_OptimizationFlag;
298 } else {
299 return kCompatibleWithCoverageAsAlpha_OptimizationFlag;
300 }
301 }
302
Ethan Nicholasabff9562017-10-09 10:54:08 -0400303 GrFragmentProcessor(ClassID classID, OptimizationFlags optimizationFlags)
Robert Phillips39667382019-04-17 16:03:30 -0400304 : INHERITED(classID)
Ethan Nicholasd4efe682019-08-29 16:10:13 -0400305 , fFlags(optimizationFlags | kComputeLocalCoordsInVertexShader_Flag) {
306 SkASSERT((optimizationFlags & ~kAll_OptimizationFlags) == 0);
Brian Salomon587e08f2017-01-27 10:59:27 -0500307 }
308
309 OptimizationFlags optimizationFlags() const {
310 return static_cast<OptimizationFlags>(kAll_OptimizationFlags & fFlags);
311 }
312
Brian Salomonc0d79e52019-04-10 15:02:11 -0400313 /** Useful when you can't call fp->optimizationFlags() on a base class object from a subclass.*/
314 static OptimizationFlags ProcessorOptimizationFlags(const GrFragmentProcessor* fp) {
315 return fp->optimizationFlags();
316 }
317
Brian Salomon587e08f2017-01-27 10:59:27 -0500318 /**
319 * This allows one subclass to access another subclass's implementation of
320 * constantOutputForConstantInput. It must only be called when
321 * hasConstantOutputForConstantInput() is known to be true.
322 */
Brian Osman1d5b5982018-10-01 13:41:39 -0400323 static SkPMColor4f ConstantOutputForConstantInput(const GrFragmentProcessor& fp,
324 const SkPMColor4f& input) {
Brian Salomon587e08f2017-01-27 10:59:27 -0500325 SkASSERT(fp.hasConstantOutputForConstantInput());
326 return fp.constantOutputForConstantInput(input);
327 }
328
bsalomon6251d172014-10-15 10:50:36 -0700329 /**
330 * Fragment Processor subclasses call this from their constructor to register coordinate
bsalomonde258cd2014-10-15 19:06:21 -0700331 * transformations. Coord transforms provide a mechanism for a processor to receive coordinates
332 * in their FS code. The matrix expresses a transformation from local space. For a given
333 * fragment the matrix will be applied to the local coordinate that maps to the fragment.
334 *
335 * When the transformation has perspective, the transformed coordinates will have
mtklein790d74f2015-08-19 11:05:39 -0700336 * 3 components. Otherwise they'll have 2.
bsalomonde258cd2014-10-15 19:06:21 -0700337 *
338 * This must only be called from the constructor because GrProcessors are immutable. The
339 * processor subclass manages the lifetime of the transformations (this function only stores a
mtklein790d74f2015-08-19 11:05:39 -0700340 * pointer). The GrCoordTransform is typically a member field of the GrProcessor subclass.
bsalomonde258cd2014-10-15 19:06:21 -0700341 *
342 * A processor subclass that has multiple methods of construction should always add its coord
343 * transforms in a consistent order. The non-virtual implementation of isEqual() automatically
344 * compares transforms and will assume they line up across the two processor instances.
bsalomon6251d172014-10-15 10:50:36 -0700345 */
Ethan Nicholasd4efe682019-08-29 16:10:13 -0400346 void addCoordTransform(GrCoordTransform*);
bsalomon6251d172014-10-15 10:50:36 -0700347
348 /**
wangyix58d890b2015-08-12 09:40:47 -0700349 * FragmentProcessor subclasses call this from their constructor to register any child
wangyix93ab2542015-08-19 08:23:12 -0700350 * FragmentProcessors they have. This must be called AFTER all texture accesses and coord
351 * transforms have been added.
wangyix4b3050b2015-08-04 07:59:37 -0700352 * This is for processors whose shader code will be composed of nested processors whose output
353 * colors will be combined somehow to produce its output color. Registering these child
wangyix58d890b2015-08-12 09:40:47 -0700354 * processors will allow the ProgramBuilder to automatically handle their transformed coords and
355 * texture accesses and mangle their uniform and output color names.
wangyix4b3050b2015-08-04 07:59:37 -0700356 */
Brian Salomonaff329b2017-08-11 09:40:37 -0400357 int registerChildProcessor(std::unique_ptr<GrFragmentProcessor> child);
wangyix4b3050b2015-08-04 07:59:37 -0700358
Brian Salomone782f842018-07-31 13:53:11 -0400359 void setTextureSamplerCnt(int cnt) {
360 SkASSERT(cnt >= 0);
361 fTextureSamplerCnt = cnt;
362 }
363
364 /**
365 * Helper for implementing onTextureSampler(). E.g.:
366 * return IthTexureSampler(i, fMyFirstSampler, fMySecondSampler, fMyThirdSampler);
367 */
368 template <typename... Args>
369 static const TextureSampler& IthTextureSampler(int i, const TextureSampler& samp0,
370 const Args&... samps) {
371 return (0 == i) ? samp0 : IthTextureSampler(i - 1, samps...);
372 }
373 inline static const TextureSampler& IthTextureSampler(int i);
374
bsalomon6251d172014-10-15 10:50:36 -0700375private:
Brian Osman1d5b5982018-10-01 13:41:39 -0400376 virtual SkPMColor4f constantOutputForConstantInput(const SkPMColor4f& /* inputColor */) const {
Ben Wagnerb4aab9a2017-08-16 10:53:04 -0400377 SK_ABORT("Subclass must override this if advertising this optimization.");
Brian Salomon587e08f2017-01-27 10:59:27 -0500378 }
379
wangyixb1daa862015-08-18 11:29:31 -0700380 /** Returns a new instance of the appropriate *GL* implementation class
381 for the given GrFragmentProcessor; caller is responsible for deleting
382 the object. */
egdaniel57d3b032015-11-13 11:57:27 -0800383 virtual GrGLSLFragmentProcessor* onCreateGLSLInstance() const = 0;
wangyixb1daa862015-08-18 11:29:31 -0700384
wangyix4b3050b2015-08-04 07:59:37 -0700385 /** Implemented using GLFragmentProcessor::GenKey as described in this class's comment. */
Brian Salomon94efbf52016-11-29 13:43:05 -0500386 virtual void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const = 0;
wangyix4b3050b2015-08-04 07:59:37 -0700387
bsalomonde258cd2014-10-15 19:06:21 -0700388 /**
389 * Subclass implements this to support isEqual(). It will only be called if it is known that
390 * the two processors are of the same subclass (i.e. they return the same object from
391 * getFactory()). The processor subclass should not compare its coord transforms as that will
392 * be performed automatically in the non-virtual isEqual().
393 */
394 virtual bool onIsEqual(const GrFragmentProcessor&) const = 0;
395
Brian Salomone782f842018-07-31 13:53:11 -0400396 virtual const TextureSampler& onTextureSampler(int) const { return IthTextureSampler(0); }
397
bsalomonde258cd2014-10-15 19:06:21 -0700398 bool hasSameTransforms(const GrFragmentProcessor&) const;
bsalomon6251d172014-10-15 10:50:36 -0700399
Brian Salomon587e08f2017-01-27 10:59:27 -0500400 enum PrivateFlags {
401 kFirstPrivateFlag = kAll_OptimizationFlags + 1,
402 kUsesLocalCoords_Flag = kFirstPrivateFlag,
Ethan Nicholasd4efe682019-08-29 16:10:13 -0400403 kComputeLocalCoordsInVertexShader_Flag = kFirstPrivateFlag << 1,
Brian Salomon587e08f2017-01-27 10:59:27 -0500404 };
405
Ethan Nicholasd4efe682019-08-29 16:10:13 -0400406 mutable uint32_t fFlags = kComputeLocalCoordsInVertexShader_Flag;
wangyix58d890b2015-08-12 09:40:47 -0700407
Brian Salomone782f842018-07-31 13:53:11 -0400408 int fTextureSamplerCnt = 0;
409
Ethan Nicholasd4efe682019-08-29 16:10:13 -0400410 SkSTArray<4, GrCoordTransform*, true> fCoordTransforms;
bsalomona624bf32016-09-20 09:12:47 -0700411
Brian Salomonaff329b2017-08-11 09:40:37 -0400412 SkSTArray<1, std::unique_ptr<GrFragmentProcessor>, true> fChildProcessors;
bsalomon6251d172014-10-15 10:50:36 -0700413
Brian Salomone782f842018-07-31 13:53:11 -0400414 typedef GrProcessor INHERITED;
bsalomon6251d172014-10-15 10:50:36 -0700415};
416
Brian Salomone782f842018-07-31 13:53:11 -0400417/**
418 * Used to represent a texture that is required by a GrFragmentProcessor. It holds a GrTextureProxy
419 * along with an associated GrSamplerState. TextureSamplers don't perform any coord manipulation to
420 * account for texture origin.
421 */
422class GrFragmentProcessor::TextureSampler {
423public:
424 TextureSampler() = default;
425
426 /**
427 * This copy constructor is used by GrFragmentProcessor::clone() implementations. The copy
428 * always takes a new ref on the texture proxy as the new fragment processor will not yet be
429 * in pending execution state.
430 */
431 explicit TextureSampler(const TextureSampler& that)
Robert Phillipsb5204762019-06-19 14:12:13 -0400432 : fProxy(that.fProxy)
Brian Salomone782f842018-07-31 13:53:11 -0400433 , fSamplerState(that.fSamplerState) {}
434
435 TextureSampler(sk_sp<GrTextureProxy>, const GrSamplerState&);
436
437 explicit TextureSampler(sk_sp<GrTextureProxy>,
438 GrSamplerState::Filter = GrSamplerState::Filter::kNearest,
439 GrSamplerState::WrapMode wrapXAndY = GrSamplerState::WrapMode::kClamp);
440
441 TextureSampler& operator=(const TextureSampler&) = delete;
442
443 void reset(sk_sp<GrTextureProxy>, const GrSamplerState&);
444 void reset(sk_sp<GrTextureProxy>,
445 GrSamplerState::Filter = GrSamplerState::Filter::kNearest,
446 GrSamplerState::WrapMode wrapXAndY = GrSamplerState::WrapMode::kClamp);
447
448 bool operator==(const TextureSampler& that) const {
449 return this->proxy()->underlyingUniqueID() == that.proxy()->underlyingUniqueID() &&
450 fSamplerState == that.fSamplerState;
451 }
452
453 bool operator!=(const TextureSampler& other) const { return !(*this == other); }
454
Robert Phillips82774f82019-06-20 14:38:27 -0400455 SkDEBUGCODE(bool isInstantiated() const { return fProxy->isInstantiated(); })
Brian Salomone782f842018-07-31 13:53:11 -0400456
457 // 'peekTexture' should only ever be called after a successful 'instantiate' call
458 GrTexture* peekTexture() const {
Robert Phillipsb5204762019-06-19 14:12:13 -0400459 SkASSERT(fProxy->isInstantiated());
460 return fProxy->peekTexture();
Brian Salomone782f842018-07-31 13:53:11 -0400461 }
462
Robert Phillipsb5204762019-06-19 14:12:13 -0400463 GrTextureProxy* proxy() const { return fProxy.get(); }
Brian Salomone782f842018-07-31 13:53:11 -0400464 const GrSamplerState& samplerState() const { return fSamplerState; }
Greg Daniel2c3398d2019-06-19 11:58:01 -0400465 const GrSwizzle& swizzle() const { return this->proxy()->textureSwizzle(); }
Brian Salomone782f842018-07-31 13:53:11 -0400466
Robert Phillipsb5204762019-06-19 14:12:13 -0400467 bool isInitialized() const { return SkToBool(fProxy.get()); }
Brian Salomone782f842018-07-31 13:53:11 -0400468
469private:
Robert Phillipsb5204762019-06-19 14:12:13 -0400470 sk_sp<GrTextureProxy> fProxy;
471 GrSamplerState fSamplerState;
Brian Salomone782f842018-07-31 13:53:11 -0400472};
473
474//////////////////////////////////////////////////////////////////////////////
475
476const GrFragmentProcessor::TextureSampler& GrFragmentProcessor::IthTextureSampler(int i) {
477 SK_ABORT("Illegal texture sampler index");
478 static const TextureSampler kBogus;
479 return kBogus;
480}
481
Brian Salomon587e08f2017-01-27 10:59:27 -0500482GR_MAKE_BITFIELD_OPS(GrFragmentProcessor::OptimizationFlags)
483
bsalomon6251d172014-10-15 10:50:36 -0700484#endif