blob: ac7570e0a5be74bd847ed965a2a38d99bbda7300 [file] [log] [blame]
bsalomon6251d172014-10-15 10:50:36 -07001/*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrFragmentProcessor_DEFINED
9#define GrFragmentProcessor_DEFINED
10
John Stilesf08a82b2020-06-16 16:34:12 -040011#include <tuple>
12
Brian Osman1298bc42020-06-30 13:39:35 -040013#include "include/private/SkSLSampleUsage.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050014#include "src/gpu/GrProcessor.h"
Chris Dalton7eb5c0f2019-05-23 15:15:47 -060015#include "src/gpu/ops/GrOp.h"
bsalomon6251d172014-10-15 10:50:36 -070016
egdaniel64c47282015-11-13 06:54:19 -080017class GrGLSLFragmentProcessor;
Chris Dalton1c548942018-05-22 13:09:48 -060018class GrPaint;
bsalomona624bf32016-09-20 09:12:47 -070019class GrPipeline;
joshualitteb2a6762014-12-04 11:35:33 -080020class GrProcessorKeyBuilder;
Brian Salomon94efbf52016-11-29 13:43:05 -050021class GrShaderCaps;
Brian Osmance425512017-03-22 14:37:50 -040022class GrSwizzle;
Brian Salomond90b3d32020-07-09 12:04:31 -040023class GrTextureEffect;
bsalomon6251d172014-10-15 10:50:36 -070024
Ethan Nicholasf7b88202017-09-18 14:10:39 -040025/** Provides custom fragment shader code. Fragment processors receive an input color (half4) and
Brian Osman9cf98dc2020-07-01 17:21:27 -040026 produce an output color. They may reference textures and uniforms.
bsalomon6251d172014-10-15 10:50:36 -070027 */
Brian Salomone782f842018-07-31 13:53:11 -040028class GrFragmentProcessor : public GrProcessor {
bsalomon6251d172014-10-15 10:50:36 -070029public:
bsalomon87ba62e2015-09-22 06:41:59 -070030 /**
31 * In many instances (e.g. SkShader::asFragmentProcessor() implementations) it is desirable to
32 * only consider the input color's alpha. However, there is a competing desire to have reusable
33 * GrFragmentProcessor subclasses that can be used in other scenarios where the entire input
34 * color is considered. This function exists to filter the input color and pass it to a FP. It
35 * does so by returning a parent FP that multiplies the passed in FPs output by the parent's
36 * input alpha. The passed in FP will not receive an input color.
37 */
Mike Reed28eaed22018-02-01 11:24:53 -050038 static std::unique_ptr<GrFragmentProcessor> MulChildByInputAlpha(
39 std::unique_ptr<GrFragmentProcessor> child);
40
41 /**
42 * Like MulChildByInputAlpha(), but reverses the sense of src and dst. In this case, return
43 * the input modulated by the child's alpha. The passed in FP will not receive an input color.
44 *
45 * output = input * child.a
46 */
47 static std::unique_ptr<GrFragmentProcessor> MulInputByChildAlpha(
48 std::unique_ptr<GrFragmentProcessor> child);
bsalomonf1b7a1d2015-09-28 06:26:28 -070049
50 /**
John Stiles7bf26002020-07-13 11:30:12 -040051 * Returns a fragment processor that generates the passed-in color, modulated by the child's
52 * alpha channel. (Pass a null FP to use the alpha from sk_InColor instead of a child FP.)
53 */
54 static std::unique_ptr<GrFragmentProcessor> ModulateAlpha(
55 std::unique_ptr<GrFragmentProcessor> child, const SkPMColor4f& color);
56
57 /**
John Stiles85894302020-07-13 11:39:52 -040058 * Returns a fragment processor that generates the passed-in color, modulated by the child's
59 * RGBA color. (Pass a null FP to use the color from sk_InColor instead of a child FP.)
60 */
61 static std::unique_ptr<GrFragmentProcessor> ModulateRGBA(
62 std::unique_ptr<GrFragmentProcessor> child, const SkPMColor4f& color);
63
64 /**
Brian Salomon22af73f2017-01-26 11:25:12 -050065 * This assumes that the input color to the returned processor will be unpremul and that the
66 * passed processor (which becomes the returned processor's child) produces a premul output.
67 * The result of the returned processor is a premul of its input color modulated by the child
68 * processor's premul output.
bsalomonf1b7a1d2015-09-28 06:26:28 -070069 */
Brian Salomonaff329b2017-08-11 09:40:37 -040070 static std::unique_ptr<GrFragmentProcessor> MakeInputPremulAndMulByOutput(
71 std::unique_ptr<GrFragmentProcessor>);
bsalomonf1b7a1d2015-09-28 06:26:28 -070072
73 /**
bsalomone25eea42015-09-29 06:38:55 -070074 * Returns a parent fragment processor that adopts the passed fragment processor as a child.
75 * The parent will ignore its input color and instead feed the passed in color as input to the
76 * child.
bsalomonf1b7a1d2015-09-28 06:26:28 -070077 */
Brian Salomonaff329b2017-08-11 09:40:37 -040078 static std::unique_ptr<GrFragmentProcessor> OverrideInput(std::unique_ptr<GrFragmentProcessor>,
Brian Salomonc0d79e52019-04-10 15:02:11 -040079 const SkPMColor4f&,
80 bool useUniform = true);
bsalomon87ba62e2015-09-22 06:41:59 -070081
bsalomone25eea42015-09-29 06:38:55 -070082 /**
dvonbeckc526da92016-07-20 11:20:30 -070083 * Returns a fragment processor that premuls the input before calling the passed in fragment
84 * processor.
85 */
Brian Salomonaff329b2017-08-11 09:40:37 -040086 static std::unique_ptr<GrFragmentProcessor> PremulInput(std::unique_ptr<GrFragmentProcessor>);
dvonbeckc526da92016-07-20 11:20:30 -070087
88 /**
Brian Osmance425512017-03-22 14:37:50 -040089 * Returns a fragment processor that calls the passed in fragment processor, and then swizzles
90 * the output.
91 */
Brian Salomonaff329b2017-08-11 09:40:37 -040092 static std::unique_ptr<GrFragmentProcessor> SwizzleOutput(std::unique_ptr<GrFragmentProcessor>,
93 const GrSwizzle&);
Brian Osmance425512017-03-22 14:37:50 -040094
95 /**
Brian Osman6f5e9402020-01-22 10:39:31 -050096 * Returns a fragment processor that calls the passed in fragment processor, and then ensures
97 * the output is a valid premul color by clamping RGB to [0, A].
98 */
99 static std::unique_ptr<GrFragmentProcessor> ClampPremulOutput(
100 std::unique_ptr<GrFragmentProcessor>);
101
102 /**
bsalomone25eea42015-09-29 06:38:55 -0700103 * Returns a fragment processor that runs the passed in array of fragment processors in a
104 * series. The original input is passed to the first, the first's output is passed to the
105 * second, etc. The output of the returned processor is the output of the last processor of the
106 * series.
bungeman06ca8ec2016-06-09 08:01:03 -0700107 *
108 * The array elements with be moved.
bsalomone25eea42015-09-29 06:38:55 -0700109 */
Brian Salomon64f42062020-02-14 10:42:45 -0500110 static std::unique_ptr<GrFragmentProcessor> RunInSeries(std::unique_ptr<GrFragmentProcessor>[],
Brian Salomonaff329b2017-08-11 09:40:37 -0400111 int cnt);
bsalomonac856c92015-08-27 06:30:17 -0700112
Brian Salomon0e05a822017-07-25 09:43:22 -0400113 /**
114 * Makes a copy of this fragment processor that draws equivalently to the original.
Brian Salomon96271cd2017-07-31 16:27:23 -0400115 * If the processor has child processors they are cloned as well.
Brian Salomon0e05a822017-07-25 09:43:22 -0400116 */
Brian Salomonaff329b2017-08-11 09:40:37 -0400117 virtual std::unique_ptr<GrFragmentProcessor> clone() const = 0;
Brian Salomon0e05a822017-07-25 09:43:22 -0400118
Michael Ludwige88320b2020-06-24 09:04:56 -0400119 // The FP this was registered with as a child function. This will be null if this is a root.
120 const GrFragmentProcessor* parent() const { return fParent; }
121
egdaniel57d3b032015-11-13 11:57:27 -0800122 GrGLSLFragmentProcessor* createGLSLInstance() const;
joshualitteb2a6762014-12-04 11:35:33 -0800123
Brian Salomon94efbf52016-11-29 13:43:05 -0500124 void getGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const {
egdaniel57d3b032015-11-13 11:57:27 -0800125 this->onGetGLSLProcessorKey(caps, b);
Brian Osman12c5d292020-07-13 16:11:35 -0400126 for (const auto& child : fChildProcessors) {
127 if (child) {
128 child->getGLSLProcessorKey(caps, b);
129 }
wangyix4b3050b2015-08-04 07:59:37 -0700130 }
131 }
132
Brian Osman9cf98dc2020-07-01 17:21:27 -0400133 int numVaryingCoordsUsed() const { return this->usesVaryingCoordsDirectly() ? 1 : 0; }
joshualittabb52a12015-01-13 15:02:10 -0800134
wangyix4b3050b2015-08-04 07:59:37 -0700135 int numChildProcessors() const { return fChildProcessors.count(); }
Brian Osman12c5d292020-07-13 16:11:35 -0400136 int numNonNullChildProcessors() const;
wangyix4b3050b2015-08-04 07:59:37 -0700137
Brian Osman12c5d292020-07-13 16:11:35 -0400138 GrFragmentProcessor* childProcessor(int index) { return fChildProcessors[index].get(); }
139 const GrFragmentProcessor* childProcessor(int index) const {
140 return fChildProcessors[index].get();
141 }
wangyix4b3050b2015-08-04 07:59:37 -0700142
Robert Phillips82774f82019-06-20 14:38:27 -0400143 SkDEBUGCODE(bool isInstantiated() const;)
Robert Phillips9bee2e52017-05-29 12:37:20 -0400144
Michael Ludwige88320b2020-06-24 09:04:56 -0400145 /**
Brian Osman9cf98dc2020-07-01 17:21:27 -0400146 * Does this FP require local coordinates to be produced by the primitive processor? This only
147 * returns true if this FP will directly read those local coordinates. FPs that are sampled
148 * explicitly do not require primitive-generated local coordinates (because the sample
149 * coordinates are supplied by the parent FP).
Michael Ludwige88320b2020-06-24 09:04:56 -0400150 *
151 * If the root of an FP tree does not provide explicit coordinates, the geometry processor
152 * provides the original local coordinates to start. This may be implicit as part of vertex
153 * shader-lifted varyings, or by providing the base local coordinate to the fragment shader.
154 */
Brian Osman9cf98dc2020-07-01 17:21:27 -0400155 bool usesVaryingCoordsDirectly() const {
156 return SkToBool(fFlags & kUsesSampleCoordsDirectly_Flag) &&
157 !SkToBool(fFlags & kSampledWithExplicitCoords_Flag);
158 }
159
160 /**
161 * Do any of the FPs in this tree require local coordinates to be produced by the primitive
162 * processor? This can return true even if this FP does not refer to sample coordinates, but
163 * true if a descendant FP uses them.
164 */
165 bool usesVaryingCoords() const {
Michael Ludwigfbe28592020-06-26 16:02:15 -0400166 return (SkToBool(fFlags & kUsesSampleCoordsDirectly_Flag) ||
Michael Ludwige88320b2020-06-24 09:04:56 -0400167 SkToBool(fFlags & kUsesSampleCoordsIndirectly_Flag)) &&
168 !SkToBool(fFlags & kSampledWithExplicitCoords_Flag);
Ethan Nicholasd4efe682019-08-29 16:10:13 -0400169 }
170
Michael Ludwige88320b2020-06-24 09:04:56 -0400171 /**
172 * True if this FP refers directly to the sample coordinate parameter of its function
173 * (e.g. uses EmitArgs::fSampleCoord in emitCode()). This also returns true if the
174 * coordinate reference comes from autogenerated code invoking 'sample(matrix)' expressions.
175 *
Brian Osman9cf98dc2020-07-01 17:21:27 -0400176 * Unlike usesVaryingCoords(), this can return true whether or not the FP is explicitly
177 * sampled, and does not change based on how the FP is composed. This property is specific to
178 * the FP's function and not the entire program.
Michael Ludwige88320b2020-06-24 09:04:56 -0400179 */
180 bool referencesSampleCoords() const {
Michael Ludwigfbe28592020-06-26 16:02:15 -0400181 return SkToBool(fFlags & kUsesSampleCoordsDirectly_Flag);
Michael Ludwige88320b2020-06-24 09:04:56 -0400182 }
183
184 // True if this FP's parent invokes it with 'sample(float2)' or a variable 'sample(matrix)'
Brian Salomonb10a6622020-02-20 13:36:01 -0500185 bool isSampledWithExplicitCoords() const {
Michael Ludwige88320b2020-06-24 09:04:56 -0400186 return SkToBool(fFlags & kSampledWithExplicitCoords_Flag);
Brian Salomon7d8b3972019-11-26 22:34:44 -0500187 }
188
Michael Ludwige88320b2020-06-24 09:04:56 -0400189 // True if the transform chain from root to this FP introduces perspective into the local
190 // coordinate expression.
191 bool hasPerspectiveTransform() const {
192 return SkToBool(fFlags & kNetTransformHasPerspective_Flag);
193 }
194
Brian Osman1298bc42020-06-30 13:39:35 -0400195 // The SampleUsage describing how this FP is invoked by its parent using 'sample(matrix)'
Michael Ludwige88320b2020-06-24 09:04:56 -0400196 // This only reflects the immediate sampling from parent to this FP
Brian Osman1298bc42020-06-30 13:39:35 -0400197 const SkSL::SampleUsage& sampleUsage() const {
198 return fUsage;
Ethan Nicholas58430122020-04-14 09:54:02 -0400199 }
200
Brian Salomon587e08f2017-01-27 10:59:27 -0500201 /**
Brian Salomonf3b995b2017-02-15 10:22:23 -0500202 * A GrDrawOp may premultiply its antialiasing coverage into its GrGeometryProcessor's color
203 * output under the following scenario:
204 * * all the color fragment processors report true to this query,
205 * * all the coverage fragment processors report true to this query,
206 * * the blend mode arithmetic allows for it it.
207 * To be compatible a fragment processor's output must be a modulation of its input color or
208 * alpha with a computed premultiplied color or alpha that is in 0..1 range. The computed color
209 * or alpha that is modulated against the input cannot depend on the input's alpha. The computed
210 * value cannot depend on the input's color channels unless it unpremultiplies the input color
211 * channels by the input alpha.
Brian Salomon587e08f2017-01-27 10:59:27 -0500212 */
Brian Salomonf3b995b2017-02-15 10:22:23 -0500213 bool compatibleWithCoverageAsAlpha() const {
214 return SkToBool(fFlags & kCompatibleWithCoverageAsAlpha_OptimizationFlag);
215 }
Brian Salomon587e08f2017-01-27 10:59:27 -0500216
217 /**
218 * If this is true then all opaque input colors to the processor produce opaque output colors.
219 */
220 bool preservesOpaqueInput() const {
221 return SkToBool(fFlags & kPreservesOpaqueInput_OptimizationFlag);
222 }
223
224 /**
225 * Tests whether given a constant input color the processor produces a constant output color
226 * (for all fragments). If true outputColor will contain the constant color produces for
227 * inputColor.
228 */
Brian Osman1d5b5982018-10-01 13:41:39 -0400229 bool hasConstantOutputForConstantInput(SkPMColor4f inputColor, SkPMColor4f* outputColor) const {
Brian Salomon587e08f2017-01-27 10:59:27 -0500230 if (fFlags & kConstantOutputForConstantInput_OptimizationFlag) {
231 *outputColor = this->constantOutputForConstantInput(inputColor);
232 return true;
233 }
234 return false;
235 }
236 bool hasConstantOutputForConstantInput() const {
237 return SkToBool(fFlags & kConstantOutputForConstantInput_OptimizationFlag);
238 }
dvonbeck9b03e7b2016-08-01 11:01:56 -0700239
joshualitteb2a6762014-12-04 11:35:33 -0800240 /** Returns true if this and other processor conservatively draw identically. It can only return
241 true when the two processor are of the same subclass (i.e. they return the same object from
bsalomon6251d172014-10-15 10:50:36 -0700242 from getFactory()).
243
joshualitteb2a6762014-12-04 11:35:33 -0800244 A return value of true from isEqual() should not be used to test whether the processor would
egdaniel57d3b032015-11-13 11:57:27 -0800245 generate the same shader code. To test for identical code generation use getGLSLProcessorKey
246 */
bsalomon7312ff82016-09-12 08:55:38 -0700247 bool isEqual(const GrFragmentProcessor& that) const;
bsalomon6251d172014-10-15 10:50:36 -0700248
Brian Salomond90b3d32020-07-09 12:04:31 -0400249 void visitProxies(const GrOp::VisitProxyFunc& func) const;
250
251 void visitTextureEffects(const std::function<void(const GrTextureEffect&)>&) const;
252
253 GrTextureEffect* asTextureEffect();
254 const GrTextureEffect* asTextureEffect() const;
bsalomona624bf32016-09-20 09:12:47 -0700255
Brian Salomonc241b582019-11-27 08:57:17 -0500256 // A pre-order traversal iterator over a hierarchy of FPs. It can also iterate over all the FP
257 // hierarchies rooted in a GrPaint, GrProcessorSet, or GrPipeline. For these collections it
258 // iterates the tree rooted at each color FP and then each coverage FP.
259 //
260 // An iterator is constructed from one of the srcs and used like this:
261 // for (GrFragmentProcessor::Iter iter(pipeline); iter; ++iter) {
Brian Salomon7eabfe82019-12-02 14:20:20 -0500262 // GrFragmentProcessor& fp = *iter;
Brian Salomonc241b582019-11-27 08:57:17 -0500263 // }
Brian Salomon87f4d292020-07-09 12:48:38 -0400264 // The exit test for the loop is using CIter's operator bool().
Brian Salomon7eabfe82019-12-02 14:20:20 -0500265 // To use a range-for loop instead see CIterRange below.
Brian Salomon7eabfe82019-12-02 14:20:20 -0500266 class CIter;
bsalomona624bf32016-09-20 09:12:47 -0700267
Brian Salomon7eabfe82019-12-02 14:20:20 -0500268 // Used to implement a range-for loop using CIter. Src is one of GrFragmentProcessor,
269 // GrPaint, GrProcessorSet, or GrPipeline. Type aliases for these defined below.
Brian Salomonc241b582019-11-27 08:57:17 -0500270 // Example usage:
Brian Salomon87f4d292020-07-09 12:48:38 -0400271 // for (const auto& fp : GrFragmentProcessor::PaintRange(paint)) {
Brian Salomonc241b582019-11-27 08:57:17 -0500272 // if (fp.usesLocalCoords()) {
273 // ...
274 // }
275 // }
Brian Salomon7eabfe82019-12-02 14:20:20 -0500276 template <typename Src> class CIterRange;
Brian Salomond90b3d32020-07-09 12:04:31 -0400277
Brian Salomon87f4d292020-07-09 12:48:38 -0400278 // We would use template deduction guides for CIter but for:
Brian Salomonc241b582019-11-27 08:57:17 -0500279 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79501
280 // Instead we use these specialized type aliases to make it prettier
Brian Salomon87f4d292020-07-09 12:48:38 -0400281 // to construct CIters for particular sources of FPs.
282 using FPRange = CIterRange<GrFragmentProcessor>;
283 using PaintRange = CIterRange<GrPaint>;
bsalomona624bf32016-09-20 09:12:47 -0700284
Brian Salomon87f4d292020-07-09 12:48:38 -0400285 // Sentinel type for range-for using CIter.
286 class EndCIter {};
Robert Phillipsb493eeb2017-09-13 13:10:52 -0400287
bsalomon6251d172014-10-15 10:50:36 -0700288protected:
Brian Salomon587e08f2017-01-27 10:59:27 -0500289 enum OptimizationFlags : uint32_t {
290 kNone_OptimizationFlags,
Brian Salomonf3b995b2017-02-15 10:22:23 -0500291 kCompatibleWithCoverageAsAlpha_OptimizationFlag = 0x1,
Brian Salomon587e08f2017-01-27 10:59:27 -0500292 kPreservesOpaqueInput_OptimizationFlag = 0x2,
293 kConstantOutputForConstantInput_OptimizationFlag = 0x4,
Brian Salomonf3b995b2017-02-15 10:22:23 -0500294 kAll_OptimizationFlags = kCompatibleWithCoverageAsAlpha_OptimizationFlag |
Brian Salomon587e08f2017-01-27 10:59:27 -0500295 kPreservesOpaqueInput_OptimizationFlag |
296 kConstantOutputForConstantInput_OptimizationFlag
297 };
298 GR_DECL_BITFIELD_OPS_FRIENDS(OptimizationFlags)
299
Brian Salomon6cd51b52017-07-26 19:07:15 -0400300 /**
301 * Can be used as a helper to decide which fragment processor OptimizationFlags should be set.
302 * This assumes that the subclass output color will be a modulation of the input color with a
Greg Danielc594e622019-10-15 14:01:49 -0400303 * value read from a texture of the passed color type and that the texture contains
304 * premultiplied color or alpha values that are in range.
Michael Ludwig257a03d2018-12-13 14:07:07 -0500305 *
306 * Since there are multiple ways in which a sampler may have its coordinates clamped or wrapped,
307 * callers must determine on their own if the sampling uses a decal strategy in any way, in
Greg Danielc594e622019-10-15 14:01:49 -0400308 * which case the texture may become transparent regardless of the color type.
Brian Salomon6cd51b52017-07-26 19:07:15 -0400309 */
Brian Salomonfc118442019-11-22 19:09:27 -0500310 static OptimizationFlags ModulateForSamplerOptFlags(SkAlphaType alphaType, bool samplingDecal) {
Michael Ludwig257a03d2018-12-13 14:07:07 -0500311 if (samplingDecal) {
312 return kCompatibleWithCoverageAsAlpha_OptimizationFlag;
313 } else {
Brian Salomonfc118442019-11-22 19:09:27 -0500314 return ModulateForClampedSamplerOptFlags(alphaType);
Michael Ludwig257a03d2018-12-13 14:07:07 -0500315 }
316 }
317
318 // As above, but callers should somehow ensure or assert their sampler still uses clamping
Brian Salomonfc118442019-11-22 19:09:27 -0500319 static OptimizationFlags ModulateForClampedSamplerOptFlags(SkAlphaType alphaType) {
320 if (alphaType == kOpaque_SkAlphaType) {
Brian Salomon6cd51b52017-07-26 19:07:15 -0400321 return kCompatibleWithCoverageAsAlpha_OptimizationFlag |
322 kPreservesOpaqueInput_OptimizationFlag;
323 } else {
324 return kCompatibleWithCoverageAsAlpha_OptimizationFlag;
325 }
326 }
327
Ethan Nicholasabff9562017-10-09 10:54:08 -0400328 GrFragmentProcessor(ClassID classID, OptimizationFlags optimizationFlags)
Brian Salomonb10a6622020-02-20 13:36:01 -0500329 : INHERITED(classID), fFlags(optimizationFlags) {
Ethan Nicholasd4efe682019-08-29 16:10:13 -0400330 SkASSERT((optimizationFlags & ~kAll_OptimizationFlags) == 0);
Brian Salomon587e08f2017-01-27 10:59:27 -0500331 }
332
333 OptimizationFlags optimizationFlags() const {
334 return static_cast<OptimizationFlags>(kAll_OptimizationFlags & fFlags);
335 }
336
Brian Salomonc0d79e52019-04-10 15:02:11 -0400337 /** Useful when you can't call fp->optimizationFlags() on a base class object from a subclass.*/
338 static OptimizationFlags ProcessorOptimizationFlags(const GrFragmentProcessor* fp) {
Brian Osman12c5d292020-07-13 16:11:35 -0400339 return fp ? fp->optimizationFlags() : kAll_OptimizationFlags;
Brian Salomonc0d79e52019-04-10 15:02:11 -0400340 }
341
Brian Salomon587e08f2017-01-27 10:59:27 -0500342 /**
343 * This allows one subclass to access another subclass's implementation of
344 * constantOutputForConstantInput. It must only be called when
345 * hasConstantOutputForConstantInput() is known to be true.
346 */
Brian Osman12c5d292020-07-13 16:11:35 -0400347 static SkPMColor4f ConstantOutputForConstantInput(const GrFragmentProcessor* fp,
Brian Osman1d5b5982018-10-01 13:41:39 -0400348 const SkPMColor4f& input) {
Brian Osman12c5d292020-07-13 16:11:35 -0400349 if (fp) {
350 SkASSERT(fp->hasConstantOutputForConstantInput());
351 return fp->constantOutputForConstantInput(input);
352 } else {
353 return input;
354 }
Brian Salomon587e08f2017-01-27 10:59:27 -0500355 }
356
bsalomon6251d172014-10-15 10:50:36 -0700357 /**
wangyix58d890b2015-08-12 09:40:47 -0700358 * FragmentProcessor subclasses call this from their constructor to register any child
wangyix93ab2542015-08-19 08:23:12 -0700359 * FragmentProcessors they have. This must be called AFTER all texture accesses and coord
360 * transforms have been added.
wangyix4b3050b2015-08-04 07:59:37 -0700361 * This is for processors whose shader code will be composed of nested processors whose output
Brian Osman1298bc42020-06-30 13:39:35 -0400362 * colors will be combined somehow to produce its output color. Registering these child
wangyix58d890b2015-08-12 09:40:47 -0700363 * processors will allow the ProgramBuilder to automatically handle their transformed coords and
364 * texture accesses and mangle their uniform and output color names.
Michael Ludwig9aba6252020-06-22 14:46:36 -0400365 *
Brian Osman1298bc42020-06-30 13:39:35 -0400366 * The SampleUsage parameter describes all of the ways that the child is sampled by the parent.
wangyix4b3050b2015-08-04 07:59:37 -0700367 */
Brian Osman12c5d292020-07-13 16:11:35 -0400368 void registerChild(std::unique_ptr<GrFragmentProcessor> child,
369 SkSL::SampleUsage sampleUsage = SkSL::SampleUsage::PassThrough());
John Stiles3779f442020-06-15 10:48:49 -0400370
John Stiles9ec6b052020-06-15 12:06:10 -0400371 /**
372 * This method takes an existing fragment processor, clones all of its children, and registers
373 * the clones as children of this fragment processor.
374 */
375 void cloneAndRegisterAllChildProcessors(const GrFragmentProcessor& src);
376
Michael Ludwige88320b2020-06-24 09:04:56 -0400377 // FP implementations must call this function if their matching GrGLSLFragmentProcessor's
378 // emitCode() function uses the EmitArgs::fSampleCoord variable in generated SkSL.
379 void setUsesSampleCoordsDirectly() {
380 fFlags |= kUsesSampleCoordsDirectly_Flag;
381 }
382
bsalomon6251d172014-10-15 10:50:36 -0700383private:
Brian Osman1d5b5982018-10-01 13:41:39 -0400384 virtual SkPMColor4f constantOutputForConstantInput(const SkPMColor4f& /* inputColor */) const {
Ben Wagnerb4aab9a2017-08-16 10:53:04 -0400385 SK_ABORT("Subclass must override this if advertising this optimization.");
Brian Salomon587e08f2017-01-27 10:59:27 -0500386 }
387
wangyixb1daa862015-08-18 11:29:31 -0700388 /** Returns a new instance of the appropriate *GL* implementation class
389 for the given GrFragmentProcessor; caller is responsible for deleting
390 the object. */
egdaniel57d3b032015-11-13 11:57:27 -0800391 virtual GrGLSLFragmentProcessor* onCreateGLSLInstance() const = 0;
wangyixb1daa862015-08-18 11:29:31 -0700392
wangyix4b3050b2015-08-04 07:59:37 -0700393 /** Implemented using GLFragmentProcessor::GenKey as described in this class's comment. */
Brian Salomon94efbf52016-11-29 13:43:05 -0500394 virtual void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const = 0;
wangyix4b3050b2015-08-04 07:59:37 -0700395
bsalomonde258cd2014-10-15 19:06:21 -0700396 /**
397 * Subclass implements this to support isEqual(). It will only be called if it is known that
398 * the two processors are of the same subclass (i.e. they return the same object from
Brian Osman9cf98dc2020-07-01 17:21:27 -0400399 * getFactory()).
bsalomonde258cd2014-10-15 19:06:21 -0700400 */
401 virtual bool onIsEqual(const GrFragmentProcessor&) const = 0;
402
Brian Salomon587e08f2017-01-27 10:59:27 -0500403 enum PrivateFlags {
404 kFirstPrivateFlag = kAll_OptimizationFlags + 1,
Michael Ludwige88320b2020-06-24 09:04:56 -0400405
406 // Propagate up the FP tree to the root
Michael Ludwigfbe28592020-06-26 16:02:15 -0400407 kUsesSampleCoordsIndirectly_Flag = kFirstPrivateFlag,
Michael Ludwige88320b2020-06-24 09:04:56 -0400408
409 // Does not propagate at all
Michael Ludwigfbe28592020-06-26 16:02:15 -0400410 kUsesSampleCoordsDirectly_Flag = kFirstPrivateFlag << 1,
Michael Ludwige88320b2020-06-24 09:04:56 -0400411
412 // Propagates down the FP to all its leaves
Michael Ludwigfbe28592020-06-26 16:02:15 -0400413 kSampledWithExplicitCoords_Flag = kFirstPrivateFlag << 2,
414 kNetTransformHasPerspective_Flag = kFirstPrivateFlag << 3,
Brian Salomon587e08f2017-01-27 10:59:27 -0500415 };
Michael Ludwige88320b2020-06-24 09:04:56 -0400416 void addAndPushFlagToChildren(PrivateFlags flag);
Brian Salomon587e08f2017-01-27 10:59:27 -0500417
Brian Salomonaff329b2017-08-11 09:40:37 -0400418 SkSTArray<1, std::unique_ptr<GrFragmentProcessor>, true> fChildProcessors;
Michael Ludwige88320b2020-06-24 09:04:56 -0400419 const GrFragmentProcessor* fParent = nullptr;
Brian Salomond90b3d32020-07-09 12:04:31 -0400420 uint32_t fFlags = 0;
Brian Osman1298bc42020-06-30 13:39:35 -0400421 SkSL::SampleUsage fUsage;
Ethan Nicholas58430122020-04-14 09:54:02 -0400422
Brian Salomone782f842018-07-31 13:53:11 -0400423 typedef GrProcessor INHERITED;
bsalomon6251d172014-10-15 10:50:36 -0700424};
425
Brian Salomone782f842018-07-31 13:53:11 -0400426//////////////////////////////////////////////////////////////////////////////
427
Brian Salomon587e08f2017-01-27 10:59:27 -0500428GR_MAKE_BITFIELD_OPS(GrFragmentProcessor::OptimizationFlags)
429
Brian Salomonc241b582019-11-27 08:57:17 -0500430//////////////////////////////////////////////////////////////////////////////
431
Brian Salomon87f4d292020-07-09 12:48:38 -0400432class GrFragmentProcessor::CIter {
Brian Salomonc241b582019-11-27 08:57:17 -0500433public:
Brian Salomon87f4d292020-07-09 12:48:38 -0400434 explicit CIter(const GrFragmentProcessor& fp) { fFPStack.push_back(&fp); }
Brian Salomon7eabfe82019-12-02 14:20:20 -0500435 explicit CIter(const GrPaint&);
Brian Salomon7eabfe82019-12-02 14:20:20 -0500436 explicit CIter(const GrPipeline&);
Brian Salomon87f4d292020-07-09 12:48:38 -0400437
438 const GrFragmentProcessor& operator*() const { return *fFPStack.back(); }
439 const GrFragmentProcessor* operator->() const { return fFPStack.back(); }
440
441 CIter& operator++();
442
443 operator bool() const { return !fFPStack.empty(); }
444
445 bool operator!=(const EndCIter&) { return (bool)*this; }
446
447 // Hopefully this does not actually get called because of RVO.
448 CIter(const CIter&) = default;
449
450 // Because each iterator carries a stack we want to avoid copies.
451 CIter& operator=(const CIter&) = delete;
452
453protected:
454 CIter() = delete;
455
456 SkSTArray<4, const GrFragmentProcessor*, true> fFPStack;
Brian Salomon7eabfe82019-12-02 14:20:20 -0500457};
458
459//////////////////////////////////////////////////////////////////////////////
460
461template <typename Src> class GrFragmentProcessor::CIterRange {
462public:
463 explicit CIterRange(const Src& t) : fT(t) {}
464 CIter begin() const { return CIter(fT); }
Brian Salomon87f4d292020-07-09 12:48:38 -0400465 EndCIter end() const { return EndCIter(); }
Brian Salomonc241b582019-11-27 08:57:17 -0500466
467private:
468 const Src& fT;
469};
470
John Stiles72e57642020-06-24 10:42:35 -0400471/**
472 * Some fragment-processor creation methods have preconditions that might not be satisfied by the
473 * calling code. Those methods can return a `GrFPResult` from their factory methods. If creation
474 * succeeds, the new fragment processor is created and `success` is true. If a precondition is not
475 * met, `success` is set to false and the input FP is returned unchanged.
476 */
477using GrFPResult = std::tuple<bool /*success*/, std::unique_ptr<GrFragmentProcessor>>;
478static inline GrFPResult GrFPFailure(std::unique_ptr<GrFragmentProcessor> fp) {
479 return {false, std::move(fp)};
480}
481static inline GrFPResult GrFPSuccess(std::unique_ptr<GrFragmentProcessor> fp) {
482 return {true, std::move(fp)};
483}
484
bsalomon6251d172014-10-15 10:50:36 -0700485#endif