blob: 1243a6bd2304a45778ea0a0e02a5fad6bc572515 [file] [log] [blame]
Brian Salomon34169692017-08-28 15:32:01 -04001/*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
Brian Salomond7065e72018-10-12 11:42:02 -04008#include <new>
Brian Salomonf19f9ca2019-09-18 15:54:26 -04009
Mike Kleinc0bd9f92019-04-23 12:05:21 -050010#include "include/core/SkPoint.h"
11#include "include/core/SkPoint3.h"
12#include "include/gpu/GrTexture.h"
13#include "include/private/GrRecordingContext.h"
Michael Ludwig22429f92019-06-27 10:44:48 -040014#include "include/private/SkFloatingPoint.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050015#include "include/private/SkTo.h"
16#include "src/core/SkMathPriv.h"
17#include "src/core/SkMatrixPriv.h"
18#include "src/core/SkRectPriv.h"
19#include "src/gpu/GrAppliedClip.h"
20#include "src/gpu/GrCaps.h"
21#include "src/gpu/GrDrawOpTest.h"
22#include "src/gpu/GrGeometryProcessor.h"
23#include "src/gpu/GrGpu.h"
24#include "src/gpu/GrMemoryPool.h"
25#include "src/gpu/GrOpFlushState.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050026#include "src/gpu/GrRecordingContextPriv.h"
27#include "src/gpu/GrResourceProvider.h"
28#include "src/gpu/GrResourceProviderPriv.h"
29#include "src/gpu/GrShaderCaps.h"
30#include "src/gpu/GrTexturePriv.h"
Greg Danielf91aeb22019-06-18 09:58:02 -040031#include "src/gpu/GrTextureProxy.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050032#include "src/gpu/SkGr.h"
Michael Ludwig22429f92019-06-27 10:44:48 -040033#include "src/gpu/effects/GrTextureDomain.h"
Brian Salomonf19f9ca2019-09-18 15:54:26 -040034#include "src/gpu/effects/generated/GrSaturateProcessor.h"
Michael Ludwigfd4f4df2019-05-29 09:51:09 -040035#include "src/gpu/geometry/GrQuad.h"
Michael Ludwig425eb452019-06-27 10:13:27 -040036#include "src/gpu/geometry/GrQuadBuffer.h"
Michael Ludwig0f809022019-06-04 09:14:37 -040037#include "src/gpu/geometry/GrQuadUtils.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050038#include "src/gpu/glsl/GrGLSLVarying.h"
Michael Ludwig22429f92019-06-27 10:44:48 -040039#include "src/gpu/ops/GrFillRectOp.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050040#include "src/gpu/ops/GrMeshDrawOp.h"
41#include "src/gpu/ops/GrQuadPerEdgeAA.h"
Robert Phillips3968fcb2019-12-05 16:40:31 -050042#include "src/gpu/ops/GrSimpleMeshDrawOpHelper.h"
Brian Salomonf19f9ca2019-09-18 15:54:26 -040043#include "src/gpu/ops/GrTextureOp.h"
Brian Salomon34169692017-08-28 15:32:01 -040044
45namespace {
46
Michael Ludwig460eb5e2018-10-29 11:09:29 -040047using Domain = GrQuadPerEdgeAA::Domain;
Michael Ludwigc182b942018-11-16 10:27:51 -050048using VertexSpec = GrQuadPerEdgeAA::VertexSpec;
Brian Osman3d139a42018-11-19 10:42:10 -050049using ColorType = GrQuadPerEdgeAA::ColorType;
Brian Salomonb80ffee2018-05-23 16:39:39 -040050
Michael Ludwig22429f92019-06-27 10:44:48 -040051// Extracts lengths of vertical and horizontal edges of axis-aligned quad. "width" is the edge
52// between v0 and v2 (or v1 and v3), "height" is the edge between v0 and v1 (or v2 and v3).
53static SkSize axis_aligned_quad_size(const GrQuad& quad) {
54 SkASSERT(quad.quadType() == GrQuad::Type::kAxisAligned);
55 // Simplification of regular edge length equation, since it's axis aligned and can avoid sqrt
56 float dw = sk_float_abs(quad.x(2) - quad.x(0)) + sk_float_abs(quad.y(2) - quad.y(0));
57 float dh = sk_float_abs(quad.x(1) - quad.x(0)) + sk_float_abs(quad.y(1) - quad.y(0));
58 return {dw, dh};
59}
60
61static bool filter_has_effect(const GrQuad& srcQuad, const GrQuad& dstQuad) {
62 // If not axis-aligned in src or dst, then always say it has an effect
63 if (srcQuad.quadType() != GrQuad::Type::kAxisAligned ||
64 dstQuad.quadType() != GrQuad::Type::kAxisAligned) {
65 return true;
66 }
67
68 SkRect srcRect;
69 SkRect dstRect;
70 if (srcQuad.asRect(&srcRect) && dstQuad.asRect(&dstRect)) {
71 // Disable filtering when there is no scaling (width and height are the same), and the
72 // top-left corners have the same fraction (so src and dst snap to the pixel grid
73 // identically).
74 SkASSERT(srcRect.isSorted());
75 return srcRect.width() != dstRect.width() || srcRect.height() != dstRect.height() ||
76 SkScalarFraction(srcRect.fLeft) != SkScalarFraction(dstRect.fLeft) ||
77 SkScalarFraction(srcRect.fTop) != SkScalarFraction(dstRect.fTop);
78 } else {
79 // Although the quads are axis-aligned, the local coordinate system is transformed such
80 // that fractionally-aligned sample centers will not align with the device coordinate system
81 // So disable filtering when edges are the same length and both srcQuad and dstQuad
82 // 0th vertex is integer aligned.
83 if (SkScalarIsInt(srcQuad.x(0)) && SkScalarIsInt(srcQuad.y(0)) &&
84 SkScalarIsInt(dstQuad.x(0)) && SkScalarIsInt(dstQuad.y(0))) {
85 // Extract edge lengths
86 SkSize srcSize = axis_aligned_quad_size(srcQuad);
87 SkSize dstSize = axis_aligned_quad_size(dstQuad);
88 return srcSize.fWidth != dstSize.fWidth || srcSize.fHeight != dstSize.fHeight;
89 } else {
90 return true;
91 }
92 }
93}
94
Michael Ludwig119ac6d2019-11-21 09:26:46 -050095// Describes function for normalizing src coords: [x * iw, y * ih + yOffset] can represent
96// regular and rectangular textures, w/ or w/o origin correction.
97struct NormalizationParams {
98 float fIW; // 1 / width of texture, or 1.0 for texture rectangles
99 float fIH; // 1 / height of texture, or 1.0 for tex rects, X -1 if bottom-left origin
100 float fYOffset; // 0 for top-left origin, height of [normalized] tex if bottom-left
101};
Michael Ludwigadb12e72019-12-04 16:19:18 -0500102static NormalizationParams proxy_normalization_params(const GrSurfaceProxy* proxy,
103 GrSurfaceOrigin origin) {
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500104 // Whether or not the proxy is instantiated, this is the size its texture will be, so we can
105 // normalize the src coordinates up front.
Michael Ludwigadb12e72019-12-04 16:19:18 -0500106 SkISize dimensions = proxy->backingStoreDimensions();
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500107 float iw, ih, h;
Michael Ludwigadb12e72019-12-04 16:19:18 -0500108 if (proxy->backendFormat().textureType() == GrTextureType::kRectangle) {
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500109 iw = ih = 1.f;
110 h = dimensions.height();
111 } else {
112 iw = 1.f / dimensions.width();
113 ih = 1.f / dimensions.height();
114 h = 1.f;
115 }
116
Michael Ludwigadb12e72019-12-04 16:19:18 -0500117 if (origin == kBottomLeft_GrSurfaceOrigin) {
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500118 return {iw, -ih, h};
119 } else {
120 return {iw, ih, 0.0f};
121 }
122}
123
124static void correct_domain_for_bilerp(const NormalizationParams& params,
125 SkRect* domainRect) {
126 // Normalized pixel size is also equal to iw and ih, so the insets for bilerp are just
127 // in those units and can be applied safely after normalization. However, if the domain is
128 // smaller than a texel, it should clamp to the center of that axis.
129 float dw = domainRect->width() < params.fIW ? domainRect->width() : params.fIW;
130 float dh = domainRect->height() < params.fIH ? domainRect->height() : params.fIH;
131 domainRect->inset(0.5f * dw, 0.5f * dh);
132}
133
134// Normalize the domain and inset for bilerp as necessary. If 'domainRect' is null, it is assumed
135// no domain constraint is desired, so a sufficiently large rect is returned even if the quad
136// ends up batched with an op that uses domains overall.
137static SkRect normalize_domain(GrSamplerState::Filter filter,
138 const NormalizationParams& params,
139 const SkRect* domainRect) {
Brian Salomon246bc3d2018-12-06 15:33:02 -0500140 static constexpr SkRect kLargeRect = {-100000, -100000, 1000000, 1000000};
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500141 if (!domainRect) {
Michael Ludwig460eb5e2018-10-29 11:09:29 -0400142 // Either the quad has no domain constraint and is batched with a domain constrained op
143 // (in which case we want a domain that doesn't restrict normalized tex coords), or the
144 // entire op doesn't use the domain, in which case the returned value is ignored.
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500145 return kLargeRect;
Michael Ludwig460eb5e2018-10-29 11:09:29 -0400146 }
147
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500148 auto ltrb = skvx::Vec<4, float>::Load(domainRect);
149 // Normalize and offset
150 ltrb = mad(ltrb, {params.fIW, params.fIH, params.fIW, params.fIH},
151 {0.f, params.fYOffset, 0.f, params.fYOffset});
152 if (params.fIH < 0.f) {
153 // Flip top and bottom to keep the rect sorted when loaded back to SkRect.
154 ltrb = skvx::shuffle<0, 3, 2, 1>(ltrb);
Michael Ludwig460eb5e2018-10-29 11:09:29 -0400155 }
156
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500157 SkRect out;
158 ltrb.store(&out);
159
160 if (filter != GrSamplerState::Filter::kNearest) {
161 correct_domain_for_bilerp(params, &out);
162 }
163 return out;
Michael Ludwig460eb5e2018-10-29 11:09:29 -0400164}
165
Michael Ludwig009b92e2019-02-15 16:03:53 -0500166// Normalizes logical src coords and corrects for origin
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500167static void normalize_src_quad(const NormalizationParams& params,
168 GrQuad* srcQuad) {
Michael Ludwig009b92e2019-02-15 16:03:53 -0500169 // The src quad should not have any perspective
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500170 SkASSERT(!srcQuad->hasPerspective());
171 skvx::Vec<4, float> xs = srcQuad->x4f() * params.fIW;
172 skvx::Vec<4, float> ys = mad(srcQuad->y4f(), params.fIH, params.fYOffset);
173 xs.store(srcQuad->xs());
174 ys.store(srcQuad->ys());
Michael Ludwig009b92e2019-02-15 16:03:53 -0500175}
Michael Ludwig460eb5e2018-10-29 11:09:29 -0400176
Michael Ludwig379e4962019-12-06 13:21:26 -0500177// Count the number of proxy runs in the entry set. This usually is already computed by
178// SkGpuDevice, but when the BatchLengthLimiter chops the set up it must determine a new proxy count
179// for each split.
180static int proxy_run_count(const GrRenderTargetContext::TextureSetEntry set[], int count) {
181 int actualProxyRunCount = 0;
182 const GrSurfaceProxy* lastProxy = nullptr;
183 for (int i = 0; i < count; ++i) {
184 if (set[i].fProxyView.proxy() != lastProxy) {
185 actualProxyRunCount++;
186 lastProxy = set[i].fProxyView.proxy();
187 }
188 }
189 return actualProxyRunCount;
190}
191
Brian Salomon34169692017-08-28 15:32:01 -0400192/**
193 * Op that implements GrTextureOp::Make. It draws textured quads. Each quad can modulate against a
194 * the texture by color. The blend with the destination is always src-over. The edges are non-AA.
195 */
196class TextureOp final : public GrMeshDrawOp {
197public:
Robert Phillipsb97da532019-02-12 15:24:12 -0500198 static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
Greg Daniel549325c2019-10-30 16:19:20 -0400199 GrSurfaceProxyView proxyView,
Michael Ludwig22429f92019-06-27 10:44:48 -0400200 sk_sp<GrColorSpaceXform> textureXform,
Robert Phillips7c525e62018-06-12 10:11:12 -0400201 GrSamplerState::Filter filter,
Brian Osman3d139a42018-11-19 10:42:10 -0500202 const SkPMColor4f& color,
Brian Salomonf19f9ca2019-09-18 15:54:26 -0400203 GrTextureOp::Saturate saturate,
Robert Phillips7c525e62018-06-12 10:11:12 -0400204 GrAAType aaType,
Brian Salomon2213ee92018-10-02 10:44:21 -0400205 GrQuadAAFlags aaFlags,
Michael Ludwig22429f92019-06-27 10:44:48 -0400206 const GrQuad& deviceQuad,
207 const GrQuad& localQuad,
208 const SkRect* domain) {
Michael Ludwig009b92e2019-02-15 16:03:53 -0500209 GrOpMemoryPool* pool = context->priv().opMemoryPool();
Greg Daniel549325c2019-10-30 16:19:20 -0400210 return pool->allocate<TextureOp>(std::move(proxyView), std::move(textureXform), filter,
211 color, saturate, aaType, aaFlags, deviceQuad, localQuad,
212 domain);
Brian Salomon34169692017-08-28 15:32:01 -0400213 }
Robert Phillipse837e612019-11-15 11:02:50 -0500214
Robert Phillipsb97da532019-02-12 15:24:12 -0500215 static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
Michael Ludwigadb12e72019-12-04 16:19:18 -0500216 GrRenderTargetContext::TextureSetEntry set[],
Brian Salomonf19f9ca2019-09-18 15:54:26 -0400217 int cnt,
Michael Ludwig379e4962019-12-06 13:21:26 -0500218 int proxyRunCnt,
Brian Salomonf19f9ca2019-09-18 15:54:26 -0400219 GrSamplerState::Filter filter,
220 GrTextureOp::Saturate saturate,
221 GrAAType aaType,
Michael Ludwig31ba7182019-04-03 10:38:06 -0400222 SkCanvas::SrcRectConstraint constraint,
Brian Salomond003d222018-11-26 13:25:05 -0500223 const SkMatrix& viewMatrix,
Brian Osman3d139a42018-11-19 10:42:10 -0500224 sk_sp<GrColorSpaceXform> textureColorSpaceXform) {
Michael Ludwig379e4962019-12-06 13:21:26 -0500225 // Allocate size based on proxyRunCnt, since that determines number of ViewCountPairs.
226 SkASSERT(proxyRunCnt <= cnt);
227
228 size_t size = sizeof(TextureOp) + sizeof(ViewCountPair) * (proxyRunCnt - 1);
Robert Phillips9da87e02019-02-04 13:26:26 -0500229 GrOpMemoryPool* pool = context->priv().opMemoryPool();
Brian Salomond7065e72018-10-12 11:42:02 -0400230 void* mem = pool->allocate(size);
Michael Ludwig379e4962019-12-06 13:21:26 -0500231 return std::unique_ptr<GrDrawOp>(
232 new (mem) TextureOp(set, cnt, proxyRunCnt, filter, saturate, aaType, constraint,
233 viewMatrix, std::move(textureColorSpaceXform)));
Brian Salomond7065e72018-10-12 11:42:02 -0400234 }
Brian Salomon34169692017-08-28 15:32:01 -0400235
Brian Salomon336ce7b2017-09-08 08:23:58 -0400236 ~TextureOp() override {
Michael Ludwigadb12e72019-12-04 16:19:18 -0500237 for (unsigned p = 1; p < fMetadata.fProxyCount; ++p) {
Greg Daniel549325c2019-10-30 16:19:20 -0400238 fViewCountPairs[p].~ViewCountPair();
Brian Salomon336ce7b2017-09-08 08:23:58 -0400239 }
240 }
Brian Salomon34169692017-08-28 15:32:01 -0400241
242 const char* name() const override { return "TextureOp"; }
243
Chris Dalton1706cbf2019-05-21 19:35:29 -0600244 void visitProxies(const VisitProxyFunc& func) const override {
Michael Ludwigadb12e72019-12-04 16:19:18 -0500245 bool mipped = (GrSamplerState::Filter::kMipMap == fMetadata.filter());
246 for (unsigned p = 0; p < fMetadata.fProxyCount; ++p) {
247 func(fViewCountPairs[p].fProxy.get(), GrMipMapped(mipped));
Brian Salomond7065e72018-10-12 11:42:02 -0400248 }
249 }
Robert Phillipsb493eeb2017-09-13 13:10:52 -0400250
Brian Osman9a390ac2018-11-12 09:47:48 -0500251#ifdef SK_DEBUG
Brian Salomon34169692017-08-28 15:32:01 -0400252 SkString dumpInfo() const override {
253 SkString str;
Brian Salomond7065e72018-10-12 11:42:02 -0400254 str.appendf("# draws: %d\n", fQuads.count());
Michael Ludwig425eb452019-06-27 10:13:27 -0400255 auto iter = fQuads.iterator();
Michael Ludwigadb12e72019-12-04 16:19:18 -0500256 for (unsigned p = 0; p < fMetadata.fProxyCount; ++p) {
Robert Phillips32803ff2019-10-23 08:26:08 -0400257 str.appendf("Proxy ID: %d, Filter: %d\n",
Michael Ludwigadb12e72019-12-04 16:19:18 -0500258 fViewCountPairs[p].fProxy->uniqueID().asUInt(),
259 static_cast<int>(fMetadata.fFilter));
Michael Ludwig425eb452019-06-27 10:13:27 -0400260 int i = 0;
Greg Daniel549325c2019-10-30 16:19:20 -0400261 while(i < fViewCountPairs[p].fQuadCnt && iter.next()) {
Michael Ludwig704d5402019-11-25 09:43:37 -0500262 const GrQuad* quad = iter.deviceQuad();
263 GrQuad uv = iter.isLocalValid() ? *(iter.localQuad()) : GrQuad();
Michael Ludwig425eb452019-06-27 10:13:27 -0400264 const ColorDomainAndAA& info = iter.metadata();
Brian Salomond7065e72018-10-12 11:42:02 -0400265 str.appendf(
Michael Ludwigf339dfe2019-06-27 10:41:28 -0400266 "%d: Color: 0x%08x, Domain(%d): [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n"
267 " UVs [(%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f)]\n"
268 " Quad [(%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f)]\n",
Michael Ludwigadb12e72019-12-04 16:19:18 -0500269 i, info.fColor.toBytes_RGBA(), fMetadata.fDomain, info.fDomainRect.fLeft,
Michael Ludwigf339dfe2019-06-27 10:41:28 -0400270 info.fDomainRect.fTop, info.fDomainRect.fRight, info.fDomainRect.fBottom,
Michael Ludwig704d5402019-11-25 09:43:37 -0500271 quad->point(0).fX, quad->point(0).fY, quad->point(1).fX, quad->point(1).fY,
272 quad->point(2).fX, quad->point(2).fY, quad->point(3).fX, quad->point(3).fY,
Michael Ludwigf339dfe2019-06-27 10:41:28 -0400273 uv.point(0).fX, uv.point(0).fY, uv.point(1).fX, uv.point(1).fY,
274 uv.point(2).fX, uv.point(2).fY, uv.point(3).fX, uv.point(3).fY);
275
Michael Ludwig425eb452019-06-27 10:13:27 -0400276 i++;
Brian Salomond7065e72018-10-12 11:42:02 -0400277 }
Brian Salomon34169692017-08-28 15:32:01 -0400278 }
279 str += INHERITED::dumpInfo();
280 return str;
281 }
Brian Osman9a390ac2018-11-12 09:47:48 -0500282#endif
Brian Salomon34169692017-08-28 15:32:01 -0400283
Brian Osman5ced0bf2019-03-15 10:15:29 -0400284 GrProcessorSet::Analysis finalize(
Chris Dalton6ce447a2019-06-23 18:07:38 -0600285 const GrCaps& caps, const GrAppliedClip*, bool hasMixedSampledCoverage,
286 GrClampType clampType) override {
Michael Ludwigadb12e72019-12-04 16:19:18 -0500287 SkASSERT(fMetadata.colorType() == ColorType::kNone);
Michael Ludwig425eb452019-06-27 10:13:27 -0400288 auto iter = fQuads.metadata();
289 while(iter.next()) {
290 auto colorType = GrQuadPerEdgeAA::MinColorType(iter->fColor, clampType, caps);
Michael Ludwig4384f042019-12-05 10:30:35 -0500291 fMetadata.fColorType = SkTMax(fMetadata.fColorType, static_cast<uint16_t>(colorType));
Brian Osman8fa7ab42019-03-18 10:22:42 -0400292 }
Chris Dalton4b62aed2019-01-15 11:53:00 -0700293 return GrProcessorSet::EmptySetAnalysis();
Brian Salomon34169692017-08-28 15:32:01 -0400294 }
295
Brian Salomon485b8c62018-01-12 15:11:06 -0500296 FixedFunctionFlags fixedFunctionFlags() const override {
Michael Ludwigadb12e72019-12-04 16:19:18 -0500297 return fMetadata.aaType() == GrAAType::kMSAA ? FixedFunctionFlags::kUsesHWAA
298 : FixedFunctionFlags::kNone;
Brian Salomon485b8c62018-01-12 15:11:06 -0500299 }
Brian Salomon34169692017-08-28 15:32:01 -0400300
301 DEFINE_OP_CLASS_ID
302
303private:
Robert Phillips7c525e62018-06-12 10:11:12 -0400304 friend class ::GrOpMemoryPool;
Brian Salomon762d5e72017-12-01 10:25:08 -0500305
Michael Ludwig425eb452019-06-27 10:13:27 -0400306 struct ColorDomainAndAA {
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500307 ColorDomainAndAA(const SkPMColor4f& color, const SkRect& domainRect, GrQuadAAFlags aaFlags)
Michael Ludwig425eb452019-06-27 10:13:27 -0400308 : fColor(color)
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500309 , fDomainRect(domainRect)
Michael Ludwig4384f042019-12-05 10:30:35 -0500310 , fAAFlags(static_cast<uint16_t>(aaFlags)) {
311 SkASSERT(fAAFlags == static_cast<uint16_t>(aaFlags));
Michael Ludwig425eb452019-06-27 10:13:27 -0400312 }
Michael Ludwig425eb452019-06-27 10:13:27 -0400313
314 SkPMColor4f fColor;
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500315 // If the op doesn't use domains, this is ignored. If the op uses domains and the specific
316 // entry does not, this rect will equal kLargeRect, so it automatically has no effect.
Michael Ludwigf339dfe2019-06-27 10:41:28 -0400317 SkRect fDomainRect;
Michael Ludwig425eb452019-06-27 10:13:27 -0400318 unsigned fAAFlags : 4;
319
Michael Ludwig425eb452019-06-27 10:13:27 -0400320 GrQuadAAFlags aaFlags() const { return static_cast<GrQuadAAFlags>(fAAFlags); }
321 };
Michael Ludwigadb12e72019-12-04 16:19:18 -0500322
Greg Daniel549325c2019-10-30 16:19:20 -0400323 struct ViewCountPair {
Michael Ludwigadb12e72019-12-04 16:19:18 -0500324 // Normally this would be a GrSurfaceProxyView, but GrTextureOp applies the GrOrigin right
325 // away so it doesn't need to be stored, and all ViewCountPairs in an op have the same
326 // swizzle so that is stored in the op metadata.
327 sk_sp<GrSurfaceProxy> fProxy;
Michael Ludwig425eb452019-06-27 10:13:27 -0400328 int fQuadCnt;
329 };
330
Michael Ludwigadb12e72019-12-04 16:19:18 -0500331 // TextureOp and ViewCountPair are 8 byte aligned. This is packed into 8 bytes to minimally
332 // increase the size of the op; increasing the op size can have a surprising impact on
333 // performance (since texture ops are one of the most commonly used in an app).
334 struct Metadata {
335 // AAType must be filled after initialization; ColorType is determined in finalize()
336 Metadata(const GrSwizzle& swizzle, GrSamplerState::Filter filter,
337 GrQuadPerEdgeAA::Domain domain, GrTextureOp::Saturate saturate)
338 : fSwizzle(swizzle)
339 , fProxyCount(1)
340 , fTotalQuadCount(1)
Michael Ludwig4384f042019-12-05 10:30:35 -0500341 , fFilter(static_cast<uint16_t>(filter))
342 , fAAType(static_cast<uint16_t>(GrAAType::kNone))
343 , fColorType(static_cast<uint16_t>(ColorType::kNone))
344 , fDomain(static_cast<uint16_t>(domain))
345 , fSaturate(static_cast<uint16_t>(saturate)) {}
Michael Ludwigadb12e72019-12-04 16:19:18 -0500346
Michael Ludwig4384f042019-12-05 10:30:35 -0500347 GrSwizzle fSwizzle; // sizeof(GrSwizzle) == uint16_t
Michael Ludwigadb12e72019-12-04 16:19:18 -0500348 uint16_t fProxyCount;
349 // This will be >= fProxyCount, since a proxy may be drawn multiple times
350 uint16_t fTotalQuadCount;
351
Michael Ludwig4384f042019-12-05 10:30:35 -0500352 // These must be based on uint16_t to help MSVC's pack bitfields optimally
353 uint16_t fFilter : 2; // GrSamplerState::Filter
354 uint16_t fAAType : 2; // GrAAType
355 uint16_t fColorType : 2; // GrQuadPerEdgeAA::ColorType
356 uint16_t fDomain : 1; // bool
357 uint16_t fSaturate : 1; // bool
358 uint16_t fUnused : 8; // # of bits left before Metadata exceeds 8 bytes
Michael Ludwigadb12e72019-12-04 16:19:18 -0500359
360 GrSamplerState::Filter filter() const {
361 return static_cast<GrSamplerState::Filter>(fFilter);
362 }
363 GrAAType aaType() const { return static_cast<GrAAType>(fAAType); }
364 ColorType colorType() const { return static_cast<ColorType>(fColorType); }
365 Domain domain() const { return static_cast<Domain>(fDomain); }
366 GrTextureOp::Saturate saturate() const {
367 return static_cast<GrTextureOp::Saturate>(fSaturate);
368 }
369
370 static_assert(GrSamplerState::kFilterCount <= 4);
371 static_assert(kGrAATypeCount <= 4);
372 static_assert(GrQuadPerEdgeAA::kColorTypeCount <= 4);
373 };
Michael Ludwig4384f042019-12-05 10:30:35 -0500374 static_assert(sizeof(Metadata) == 8);
Michael Ludwigadb12e72019-12-04 16:19:18 -0500375
Robert Phillipsc5a2c752019-10-24 13:11:45 -0400376 // This descriptor is used in both onPrePrepareDraws and onPrepareDraws.
377 //
378 // In the onPrePrepareDraws case it is allocated in the creation-time opData
379 // arena. Both allocateCommon and allocatePrePrepareOnly are called and they also allocate
380 // their memory in the creation-time opData arena.
381 //
382 // In the onPrepareDraws case this descriptor is created on the stack and only
383 // allocateCommon is called. In this case the common memory fields are allocated
384 // in the flush-time arena (i.e., as part of the flushState).
Robert Phillips32803ff2019-10-23 08:26:08 -0400385 struct PrePreparedDesc {
Robert Phillips32803ff2019-10-23 08:26:08 -0400386 VertexSpec fVertexSpec;
387 int fNumProxies = 0;
388 int fNumTotalQuads = 0;
Robert Phillipsc5a2c752019-10-24 13:11:45 -0400389 GrPipeline::DynamicStateArrays* fDynamicStateArrays = nullptr;
390 GrPipeline::FixedDynamicState* fFixedDynamicState = nullptr;
Robert Phillips32803ff2019-10-23 08:26:08 -0400391
Robert Phillipsbbd459d2019-10-29 14:40:03 -0400392 // This member variable is only used by 'onPrePrepareDraws'. The prior five are also
Robert Phillipsc5a2c752019-10-24 13:11:45 -0400393 // used by 'onPrepareDraws'
Robert Phillipsc5a2c752019-10-24 13:11:45 -0400394 char* fVertices = nullptr;
395
396 // How big should 'fVertices' be to hold all the vertex data?
397 size_t totalSizeInBytes() const {
398 return fNumTotalQuads * fVertexSpec.verticesPerQuad() * fVertexSpec.vertexSize();
399 }
400
Robert Phillipsc5a2c752019-10-24 13:11:45 -0400401 int totalNumVertices() const {
402 return fNumTotalQuads * fVertexSpec.verticesPerQuad();
403 }
Robert Phillipsc5a2c752019-10-24 13:11:45 -0400404
405 // Helper to fill in the fFixedDynamicState and fDynamicStateArrays. If there is more
406 // than one mesh/proxy they are stored in fDynamicStateArrays but if there is only one
407 // it is stored in fFixedDynamicState.
Michael Ludwigfcdd0612019-11-25 08:34:31 -0500408 void setMeshProxy(int index, GrSurfaceProxy* proxy) {
Robert Phillipsc5a2c752019-10-24 13:11:45 -0400409 SkASSERT(index < fNumProxies);
410
411 if (fDynamicStateArrays) {
412 SkASSERT(fDynamicStateArrays->fPrimitiveProcessorTextures);
413 SkASSERT(fNumProxies > 1);
414
415 fDynamicStateArrays->fPrimitiveProcessorTextures[index] = proxy;
416 } else {
417 SkASSERT(fFixedDynamicState);
418 SkASSERT(fNumProxies == 1);
419
420 fFixedDynamicState->fPrimitiveProcessorTextures[index] = proxy;
421 }
422 }
423
Robert Phillipsc5a2c752019-10-24 13:11:45 -0400424 // Allocate the fields required in both onPrePrepareDraws and onPrepareDraws
425 void allocateCommon(SkArenaAlloc* arena, const GrAppliedClip* clip) {
Robert Phillips32803ff2019-10-23 08:26:08 -0400426 // We'll use a dynamic state array for the GP textures when there are multiple ops.
427 // Otherwise, we use fixed dynamic state to specify the single op's proxy.
428 if (fNumProxies > 1) {
429 fDynamicStateArrays = Target::AllocDynamicStateArrays(arena, fNumProxies, 1, false);
430 fFixedDynamicState = Target::MakeFixedDynamicState(arena, clip, 0);
431 } else {
432 fFixedDynamicState = Target::MakeFixedDynamicState(arena, clip, 1);
Robert Phillips32803ff2019-10-23 08:26:08 -0400433 }
434 }
Robert Phillipsc5a2c752019-10-24 13:11:45 -0400435
436 // Allocate the fields only needed by onPrePrepareDraws
437 void allocatePrePrepareOnly(SkArenaAlloc* arena) {
Robert Phillipsc5a2c752019-10-24 13:11:45 -0400438 fVertices = arena->makeArrayDefault<char>(this->totalSizeInBytes());
439 }
440
Robert Phillips32803ff2019-10-23 08:26:08 -0400441 };
442
Michael Ludwigf339dfe2019-06-27 10:41:28 -0400443 // dstQuad should be the geometry transformed by the view matrix. If domainRect
444 // is not null it will be used to apply the strict src rect constraint.
Greg Daniel549325c2019-10-30 16:19:20 -0400445 TextureOp(GrSurfaceProxyView proxyView,
Brian Salomonf19f9ca2019-09-18 15:54:26 -0400446 sk_sp<GrColorSpaceXform> textureColorSpaceXform,
447 GrSamplerState::Filter filter,
448 const SkPMColor4f& color,
449 GrTextureOp::Saturate saturate,
450 GrAAType aaType,
451 GrQuadAAFlags aaFlags,
452 const GrQuad& dstQuad,
453 const GrQuad& srcQuad,
454 const SkRect* domainRect)
Brian Salomon34169692017-08-28 15:32:01 -0400455 : INHERITED(ClassID())
Michael Ludwigf339dfe2019-06-27 10:41:28 -0400456 , fQuads(1, true /* includes locals */)
Brian Osman3ebd3542018-07-30 14:36:53 -0400457 , fTextureColorSpaceXform(std::move(textureColorSpaceXform))
Robert Phillips32803ff2019-10-23 08:26:08 -0400458 , fPrePreparedDesc(nullptr)
Michael Ludwigadb12e72019-12-04 16:19:18 -0500459 , fMetadata(proxyView.swizzle(), filter, Domain(!!domainRect), saturate) {
460
Michael Ludwig6bee7762018-10-19 09:50:36 -0400461 // Clean up disparities between the overall aa type and edge configuration and apply
462 // optimizations based on the rect and matrix when appropriate
Michael Ludwig0f809022019-06-04 09:14:37 -0400463 GrQuadUtils::ResolveAAType(aaType, aaFlags, dstQuad, &aaType, &aaFlags);
Michael Ludwig4384f042019-12-05 10:30:35 -0500464 fMetadata.fAAType = static_cast<uint16_t>(aaType);
Michael Ludwig6bee7762018-10-19 09:50:36 -0400465
Brian Salomonf1709042018-10-03 11:57:00 -0400466 // We expect our caller to have already caught this optimization.
Greg Daniel549325c2019-10-30 16:19:20 -0400467 SkASSERT(!domainRect ||
468 !domainRect->contains(proxyView.proxy()->backingStoreBoundsRect()));
Michael Ludwig009b92e2019-02-15 16:03:53 -0500469
Brian Salomonf09abc52018-10-03 15:59:04 -0400470 // We may have had a strict constraint with nearest filter solely due to possible AA bloat.
471 // If we don't have (or determined we don't need) coverage AA then we can skip using a
472 // domain.
Michael Ludwigadb12e72019-12-04 16:19:18 -0500473 if (domainRect && filter == GrSamplerState::Filter::kNearest &&
Michael Ludwig6bee7762018-10-19 09:50:36 -0400474 aaType != GrAAType::kCoverage) {
Michael Ludwigf339dfe2019-06-27 10:41:28 -0400475 domainRect = nullptr;
Michael Ludwig4384f042019-12-05 10:30:35 -0500476 fMetadata.fDomain = static_cast<uint16_t>(Domain::kNo);
Brian Salomonf09abc52018-10-03 15:59:04 -0400477 }
Michael Ludwigc96fc372019-01-08 15:46:15 -0500478
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500479 // Normalize src coordinates and the domain (if set)
Michael Ludwigadb12e72019-12-04 16:19:18 -0500480 NormalizationParams params = proxy_normalization_params(proxyView.proxy(),
481 proxyView.origin());
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500482 GrQuad normalizedSrcQuad = srcQuad;
483 normalize_src_quad(params, &normalizedSrcQuad);
484 SkRect domain = normalize_domain(filter, params, domainRect);
485
486 fQuads.append(dstQuad, {color, domain, aaFlags}, &normalizedSrcQuad);
Michael Ludwigadb12e72019-12-04 16:19:18 -0500487 fViewCountPairs[0] = {proxyView.detachProxy(), 1};
Michael Ludwig425eb452019-06-27 10:13:27 -0400488
Michael Ludwig41f395d2019-05-23 13:59:45 -0400489 this->setBounds(dstQuad.bounds(), HasAABloat(aaType == GrAAType::kCoverage),
Greg Daniel5faf4742019-10-01 15:14:44 -0400490 IsHairline::kNo);
Brian Salomond7065e72018-10-12 11:42:02 -0400491 }
Robert Phillipsbbd459d2019-10-29 14:40:03 -0400492
Michael Ludwigadb12e72019-12-04 16:19:18 -0500493 TextureOp(GrRenderTargetContext::TextureSetEntry set[],
Brian Salomonf19f9ca2019-09-18 15:54:26 -0400494 int cnt,
Michael Ludwig379e4962019-12-06 13:21:26 -0500495 int proxyRunCnt,
Brian Salomonf19f9ca2019-09-18 15:54:26 -0400496 GrSamplerState::Filter filter,
497 GrTextureOp::Saturate saturate,
498 GrAAType aaType,
499 SkCanvas::SrcRectConstraint constraint,
500 const SkMatrix& viewMatrix,
Brian Salomond003d222018-11-26 13:25:05 -0500501 sk_sp<GrColorSpaceXform> textureColorSpaceXform)
Brian Salomond7065e72018-10-12 11:42:02 -0400502 : INHERITED(ClassID())
Michael Ludwigf339dfe2019-06-27 10:41:28 -0400503 , fQuads(cnt, true /* includes locals */)
Brian Salomond7065e72018-10-12 11:42:02 -0400504 , fTextureColorSpaceXform(std::move(textureColorSpaceXform))
Robert Phillips32803ff2019-10-23 08:26:08 -0400505 , fPrePreparedDesc(nullptr)
Michael Ludwigadb12e72019-12-04 16:19:18 -0500506 , fMetadata(set[0].fProxyView.swizzle(), GrSamplerState::Filter::kNearest,
507 Domain::kNo, saturate) {
508 // Update counts to reflect the batch op
Michael Ludwig379e4962019-12-06 13:21:26 -0500509 fMetadata.fProxyCount = SkToUInt(proxyRunCnt);
Michael Ludwigadb12e72019-12-04 16:19:18 -0500510 fMetadata.fTotalQuadCount = SkToUInt(cnt);
511
Brian Salomond7065e72018-10-12 11:42:02 -0400512 SkRect bounds = SkRectPriv::MakeLargestInverted();
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500513
514 GrAAType netAAType = GrAAType::kNone; // aa type maximally compatible with all dst rects
Michael Ludwig31ba7182019-04-03 10:38:06 -0400515 Domain netDomain = Domain::kNo;
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500516 GrSamplerState::Filter netFilter = GrSamplerState::Filter::kNearest;
517
518 // Net domain and filter quality are being determined simultaneously while iterating through
519 // the entry set. When filter changes to bilerp, all prior normalized domains in the
520 // GrQuadBuffer must be updated to reflect the 1/2px inset required. All quads appended
521 // afterwards will properly take that into account.
522 int correctDomainUpToIndex = 0;
Michael Ludwig379e4962019-12-06 13:21:26 -0500523 const GrSurfaceProxy* curProxy = nullptr;
524 // 'q' is the index in 'set' and fQuadBuffer; 'p' is the index in fViewCountPairs and only
525 // increases when set[q]'s proxy changes.
526 unsigned p = 0;
527 for (unsigned q = 0; q < fMetadata.fTotalQuadCount; ++q) {
528 if (q == 0) {
Greg Daniel549325c2019-10-30 16:19:20 -0400529 // We do not placement new the first ViewCountPair since that one is allocated and
530 // initialized as part of the GrTextureOp creation.
Michael Ludwig379e4962019-12-06 13:21:26 -0500531 fViewCountPairs[0].fProxy = set[0].fProxyView.detachProxy();
532 fViewCountPairs[0].fQuadCnt = 0;
533 curProxy = fViewCountPairs[0].fProxy.get();
534 } else if (set[q].fProxyView.proxy() != curProxy) {
Greg Daniel549325c2019-10-30 16:19:20 -0400535 // We must placement new the ViewCountPairs here so that the sk_sps in the
536 // GrSurfaceProxyView get initialized properly.
Michael Ludwig379e4962019-12-06 13:21:26 -0500537 new(&fViewCountPairs[++p])ViewCountPair({set[q].fProxyView.detachProxy(), 0});
Michael Ludwigadb12e72019-12-04 16:19:18 -0500538
Michael Ludwig379e4962019-12-06 13:21:26 -0500539 curProxy = fViewCountPairs[p].fProxy.get();
540 SkASSERT(curProxy->backendFormat().textureType() ==
541 fViewCountPairs[0].fProxy->backendFormat().textureType());
542 SkASSERT(fMetadata.fSwizzle == set[q].fProxyView.swizzle());
543 SkASSERT(curProxy->config() == fViewCountPairs[0].fProxy->config());
544 } // else another quad referencing the same proxy
Michael Ludwigce62dec2019-02-19 11:48:46 -0500545
Michael Ludwig7ae2ab52019-03-05 16:00:20 -0500546 SkMatrix ctm = viewMatrix;
Michael Ludwig379e4962019-12-06 13:21:26 -0500547 if (set[q].fPreViewMatrix) {
548 ctm.preConcat(*set[q].fPreViewMatrix);
Michael Ludwig7ae2ab52019-03-05 16:00:20 -0500549 }
550
Michael Ludwigf339dfe2019-06-27 10:41:28 -0400551 // Use dstRect/srcRect unless dstClip is provided, in which case derive new source
552 // coordinates by mapping dstClipQuad by the dstRect to srcRect transform.
553 GrQuad quad, srcQuad;
Michael Ludwig379e4962019-12-06 13:21:26 -0500554 if (set[q].fDstClipQuad) {
555 quad = GrQuad::MakeFromSkQuad(set[q].fDstClipQuad, ctm);
Michael Ludwigf339dfe2019-06-27 10:41:28 -0400556
557 SkPoint srcPts[4];
Michael Ludwig379e4962019-12-06 13:21:26 -0500558 GrMapRectPoints(set[q].fDstRect, set[q].fSrcRect, set[q].fDstClipQuad, srcPts, 4);
Michael Ludwigf339dfe2019-06-27 10:41:28 -0400559 srcQuad = GrQuad::MakeFromSkQuad(srcPts, SkMatrix::I());
560 } else {
Michael Ludwig379e4962019-12-06 13:21:26 -0500561 quad = GrQuad::MakeFromRect(set[q].fDstRect, ctm);
562 srcQuad = GrQuad(set[q].fSrcRect);
Michael Ludwigf339dfe2019-06-27 10:41:28 -0400563 }
Michael Ludwigce62dec2019-02-19 11:48:46 -0500564
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500565 // Before normalizing the source coordinates, determine if bilerp is actually needed
566 if (netFilter != filter && filter_has_effect(srcQuad, quad)) {
567 // The only way netFilter != filter is if bilerp is requested and we haven't yet
568 // found a quad that requires bilerp (so net is still nearest).
569 SkASSERT(netFilter == GrSamplerState::Filter::kNearest &&
570 filter == GrSamplerState::Filter::kBilerp);
571 netFilter = GrSamplerState::Filter::kBilerp;
Michael Ludwig379e4962019-12-06 13:21:26 -0500572 // All quads index < q with domains were calculated as if there was no filtering,
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500573 // which is no longer true.
Michael Ludwig379e4962019-12-06 13:21:26 -0500574 correctDomainUpToIndex = q;
Michael Ludwig22429f92019-06-27 10:44:48 -0400575 }
576
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500577 // Normalize the src quads and apply origin
Michael Ludwigadb12e72019-12-04 16:19:18 -0500578 NormalizationParams proxyParams = proxy_normalization_params(
Michael Ludwig379e4962019-12-06 13:21:26 -0500579 curProxy, set[q].fProxyView.origin());
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500580 normalize_src_quad(proxyParams, &srcQuad);
581
582 // Update overall bounds of the op as the union of all quads
Michael Ludwig41f395d2019-05-23 13:59:45 -0400583 bounds.joinPossiblyEmptyRect(quad.bounds());
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500584
585 // Determine the AA type for the quad, then merge with net AA type
Michael Ludwig6bee7762018-10-19 09:50:36 -0400586 GrQuadAAFlags aaFlags;
Michael Ludwig6bee7762018-10-19 09:50:36 -0400587 GrAAType aaForQuad;
Michael Ludwig379e4962019-12-06 13:21:26 -0500588 GrQuadUtils::ResolveAAType(aaType, set[q].fAAFlags, quad, &aaForQuad, &aaFlags);
Michael Ludwig6bee7762018-10-19 09:50:36 -0400589 // Resolve sets aaForQuad to aaType or None, there is never a change between aa methods
590 SkASSERT(aaForQuad == GrAAType::kNone || aaForQuad == aaType);
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500591 if (netAAType == GrAAType::kNone && aaForQuad != GrAAType::kNone) {
592 netAAType = aaType;
Brian Salomond7065e72018-10-12 11:42:02 -0400593 }
Michael Ludwigf339dfe2019-06-27 10:41:28 -0400594
595 // Calculate metadata for the entry
596 const SkRect* domainForQuad = nullptr;
Michael Ludwig31ba7182019-04-03 10:38:06 -0400597 if (constraint == SkCanvas::kStrict_SrcRectConstraint) {
598 // Check (briefly) if the strict constraint is needed for this set entry
Michael Ludwig379e4962019-12-06 13:21:26 -0500599 if (!set[q].fSrcRect.contains(curProxy->backingStoreBoundsRect()) &&
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500600 (netFilter == GrSamplerState::Filter::kBilerp ||
601 aaForQuad == GrAAType::kCoverage)) {
Michael Ludwig31ba7182019-04-03 10:38:06 -0400602 // Can't rely on hardware clamping and the draw will access outer texels
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500603 // for AA and/or bilerp. Unlike filter quality, this op still has per-quad
604 // control over AA so that can check aaForQuad, not netAAType.
Michael Ludwig31ba7182019-04-03 10:38:06 -0400605 netDomain = Domain::kYes;
Michael Ludwig379e4962019-12-06 13:21:26 -0500606 domainForQuad = &set[q].fSrcRect;
Michael Ludwig31ba7182019-04-03 10:38:06 -0400607 }
608 }
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500609
Michael Ludwig379e4962019-12-06 13:21:26 -0500610 // Always append a quad, it just may refer back to a prior ViewCountPair
611 // (this frequently happens when Chrome draws 9-patches).
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500612 SkRect domain = normalize_domain(filter, proxyParams, domainForQuad);
Michael Ludwig379e4962019-12-06 13:21:26 -0500613 float alpha = SkTPin(set[q].fAlpha, 0.f, 1.f);
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500614 fQuads.append(quad, {{alpha, alpha, alpha, alpha}, domain, aaFlags}, &srcQuad);
Michael Ludwig379e4962019-12-06 13:21:26 -0500615 fViewCountPairs[p].fQuadCnt++;
Brian Salomond7065e72018-10-12 11:42:02 -0400616 }
Michael Ludwig379e4962019-12-06 13:21:26 -0500617 // The # of proxy switches should match what was provided (-1 because we incremented p
618 // when a new proxy was encountered).
619 SkASSERT(p == fMetadata.fProxyCount - 1);
620 SkASSERT(fQuads.count() == fMetadata.fTotalQuadCount);
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500621
622 // All the quads have been recorded, but some domains need to be fixed
623 if (netDomain == Domain::kYes && correctDomainUpToIndex > 0) {
Michael Ludwig379e4962019-12-06 13:21:26 -0500624 int p = 0; // for fViewCountPairs
625 int q = 0; // for set/fQuads
626 int netVCt = 0;
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500627 auto iter = fQuads.metadata();
Michael Ludwig379e4962019-12-06 13:21:26 -0500628 while(q < correctDomainUpToIndex && iter.next()) {
Michael Ludwigadb12e72019-12-04 16:19:18 -0500629 NormalizationParams proxyParams = proxy_normalization_params(
Michael Ludwig379e4962019-12-06 13:21:26 -0500630 fViewCountPairs[p].fProxy.get(), set[q].fProxyView.origin());
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500631 correct_domain_for_bilerp(proxyParams, &(iter->fDomainRect));
Michael Ludwig379e4962019-12-06 13:21:26 -0500632 q++;
633 if (q - netVCt >= fViewCountPairs[p].fQuadCnt) {
634 // Advance to the next view count pair
635 netVCt += fViewCountPairs[p].fQuadCnt;
636 p++;
637 }
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500638 }
Brian Salomon0087c832018-10-15 14:48:20 -0400639 }
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500640
Michael Ludwig4384f042019-12-05 10:30:35 -0500641 fMetadata.fAAType = static_cast<uint16_t>(netAAType);
642 fMetadata.fFilter = static_cast<uint16_t>(netFilter);
643 fMetadata.fDomain = static_cast<uint16_t>(netDomain);
Brian Salomon34169692017-08-28 15:32:01 -0400644
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500645 this->setBounds(bounds, HasAABloat(netAAType == GrAAType::kCoverage), IsHairline::kNo);
Brian Salomon17031a72018-05-22 14:14:07 -0400646 }
647
Robert Phillipsdf70f152019-11-15 14:57:05 -0500648 void onPrePrepareDraws(GrRecordingContext* context,
649 const GrSurfaceProxyView* dstView,
Robert Phillips8053c972019-11-21 10:44:53 -0500650 GrAppliedClip* clip,
651 const GrXferProcessor::DstProxyView& dstProxyView) override {
Robert Phillips61fc7992019-10-22 11:58:17 -0400652 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
Robert Phillips29f38542019-10-16 09:20:25 -0400653
Robert Phillips61fc7992019-10-22 11:58:17 -0400654 SkDEBUGCODE(this->validate();)
Robert Phillips32803ff2019-10-23 08:26:08 -0400655 SkASSERT(!fPrePreparedDesc);
Robert Phillips61fc7992019-10-22 11:58:17 -0400656
Robert Phillipsd4fb7c72019-11-15 17:28:37 -0500657 SkArenaAlloc* arena = context->priv().recordTimeAllocator();
Robert Phillips61fc7992019-10-22 11:58:17 -0400658
Robert Phillips32803ff2019-10-23 08:26:08 -0400659 fPrePreparedDesc = arena->make<PrePreparedDesc>();
Robert Phillips61fc7992019-10-22 11:58:17 -0400660
Robert Phillipsc554dcf2019-10-28 11:43:55 -0400661 this->characterize(fPrePreparedDesc);
662
Robert Phillipsc5a2c752019-10-24 13:11:45 -0400663 fPrePreparedDesc->allocateCommon(arena, clip);
Robert Phillips61fc7992019-10-22 11:58:17 -0400664
Robert Phillipsc5a2c752019-10-24 13:11:45 -0400665 fPrePreparedDesc->allocatePrePrepareOnly(arena);
666
Robert Phillipsbbd459d2019-10-29 14:40:03 -0400667 // At this juncture we only fill in the vertex data and state arrays. Filling in of
668 // the meshes is left until onPrepareDraws.
Robert Phillips2f05a482019-11-25 09:54:43 -0500669 SkAssertResult(FillInData(*context->priv().caps(), this, fPrePreparedDesc,
670 fPrePreparedDesc->fVertices, nullptr, 0, nullptr, nullptr));
Robert Phillipsbbd459d2019-10-29 14:40:03 -0400671 }
Robert Phillipsc5a2c752019-10-24 13:11:45 -0400672
Robert Phillips2f05a482019-11-25 09:54:43 -0500673 static bool FillInData(const GrCaps& caps, TextureOp* texOp, PrePreparedDesc* desc,
Robert Phillipsbbd459d2019-10-29 14:40:03 -0400674 char* pVertexData, GrMesh* meshes, int absBufferOffset,
Robert Phillipsfd0c3b52019-11-01 08:44:42 -0400675 sk_sp<const GrBuffer> vertexBuffer,
676 sk_sp<const GrBuffer> indexBuffer) {
677 int totQuadsSeen = 0;
Robert Phillipsbbd459d2019-10-29 14:40:03 -0400678 SkDEBUGCODE(int totVerticesSeen = 0;)
Michael Ludwig189c9802019-11-21 11:21:12 -0500679 SkDEBUGCODE(const size_t vertexSize = desc->fVertexSpec.vertexSize());
Robert Phillipsc5a2c752019-10-24 13:11:45 -0400680
Michael Ludwig189c9802019-11-21 11:21:12 -0500681 GrQuadPerEdgeAA::Tessellator tessellator(desc->fVertexSpec, pVertexData);
Robert Phillipsbbd459d2019-10-29 14:40:03 -0400682 int meshIndex = 0;
683 for (const auto& op : ChainRange<TextureOp>(texOp)) {
684 auto iter = op.fQuads.iterator();
Michael Ludwigadb12e72019-12-04 16:19:18 -0500685 for (unsigned p = 0; p < op.fMetadata.fProxyCount; ++p) {
Michael Ludwig189c9802019-11-21 11:21:12 -0500686 const int quadCnt = op.fViewCountPairs[p].fQuadCnt;
687 SkDEBUGCODE(int meshVertexCnt = quadCnt * desc->fVertexSpec.verticesPerQuad());
Robert Phillipsbbd459d2019-10-29 14:40:03 -0400688 SkASSERT(meshIndex < desc->fNumProxies);
Robert Phillipsc5a2c752019-10-24 13:11:45 -0400689
Michael Ludwig189c9802019-11-21 11:21:12 -0500690 if (pVertexData) {
691 for (int i = 0; i < quadCnt && iter.next(); ++i) {
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500692 SkASSERT(iter.isLocalValid());
693 const ColorDomainAndAA& info = iter.metadata();
Michael Ludwig189c9802019-11-21 11:21:12 -0500694 tessellator.append(iter.deviceQuad(), iter.localQuad(),
695 info.fColor, info.fDomainRect, info.aaFlags());
Michael Ludwig119ac6d2019-11-21 09:26:46 -0500696 }
Michael Ludwigadb12e72019-12-04 16:19:18 -0500697 desc->setMeshProxy(meshIndex, op.fViewCountPairs[p].fProxy.get());
Robert Phillipsbbd459d2019-10-29 14:40:03 -0400698
Michael Ludwig189c9802019-11-21 11:21:12 -0500699 SkASSERT((totVerticesSeen + meshVertexCnt) * vertexSize
700 == (size_t)(tessellator.vertices() - pVertexData));
Robert Phillipsc5a2c752019-10-24 13:11:45 -0400701 }
Robert Phillipsbbd459d2019-10-29 14:40:03 -0400702
703 if (meshes) {
Robert Phillips2f05a482019-11-25 09:54:43 -0500704 GrQuadPerEdgeAA::ConfigureMesh(caps, &(meshes[meshIndex]), desc->fVertexSpec,
Robert Phillipsfd0c3b52019-11-01 08:44:42 -0400705 totQuadsSeen, quadCnt, desc->totalNumVertices(),
706 vertexBuffer, indexBuffer, absBufferOffset);
Robert Phillipsbbd459d2019-10-29 14:40:03 -0400707 }
708
709 ++meshIndex;
710
Robert Phillipsfd0c3b52019-11-01 08:44:42 -0400711 totQuadsSeen += quadCnt;
712 SkDEBUGCODE(totVerticesSeen += meshVertexCnt);
713 SkASSERT(totQuadsSeen * desc->fVertexSpec.verticesPerQuad() == totVerticesSeen);
Robert Phillipsc5a2c752019-10-24 13:11:45 -0400714 }
715
Robert Phillipsbbd459d2019-10-29 14:40:03 -0400716 // If quad counts per proxy were calculated correctly, the entire iterator
717 // should have been consumed.
Michael Ludwig189c9802019-11-21 11:21:12 -0500718 SkASSERT(!pVertexData || !iter.next());
Robert Phillipsc5a2c752019-10-24 13:11:45 -0400719 }
Robert Phillipsbbd459d2019-10-29 14:40:03 -0400720
Michael Ludwig189c9802019-11-21 11:21:12 -0500721 SkASSERT(!pVertexData ||
722 (desc->totalSizeInBytes() == (size_t)(tessellator.vertices() - pVertexData)));
Robert Phillipsbbd459d2019-10-29 14:40:03 -0400723 SkASSERT(meshIndex == desc->fNumProxies);
724 SkASSERT(totQuadsSeen == desc->fNumTotalQuads);
725 SkASSERT(totVerticesSeen == desc->totalNumVertices());
726 return true;
Robert Phillips7327c9d2019-10-08 16:32:56 -0400727 }
728
Robert Phillips29f38542019-10-16 09:20:25 -0400729#ifdef SK_DEBUG
730 void validate() const override {
Michael Ludwigfcdd0612019-11-25 08:34:31 -0500731 // NOTE: Since this is debug-only code, we use the virtual asTextureProxy()
Michael Ludwigadb12e72019-12-04 16:19:18 -0500732 auto textureType = fViewCountPairs[0].fProxy->asTextureProxy()->textureType();
733 GrAAType aaType = fMetadata.aaType();
Robert Phillips29f38542019-10-16 09:20:25 -0400734
Robert Phillipse837e612019-11-15 11:02:50 -0500735 int quadCount = 0;
Robert Phillips29f38542019-10-16 09:20:25 -0400736 for (const auto& op : ChainRange<TextureOp>(this)) {
Michael Ludwigadb12e72019-12-04 16:19:18 -0500737 SkASSERT(op.fMetadata.fSwizzle == fMetadata.fSwizzle);
738
739 for (unsigned p = 0; p < op.fMetadata.fProxyCount; ++p) {
740 auto* proxy = op.fViewCountPairs[p].fProxy->asTextureProxy();
Robert Phillipse837e612019-11-15 11:02:50 -0500741 quadCount += op.fViewCountPairs[p].fQuadCnt;
Robert Phillips29f38542019-10-16 09:20:25 -0400742 SkASSERT(proxy);
743 SkASSERT(proxy->textureType() == textureType);
Robert Phillips29f38542019-10-16 09:20:25 -0400744 }
745
746 // Each individual op must be a single aaType. kCoverage and kNone ops can chain
747 // together but kMSAA ones do not.
748 if (aaType == GrAAType::kCoverage || aaType == GrAAType::kNone) {
Michael Ludwigadb12e72019-12-04 16:19:18 -0500749 SkASSERT(op.fMetadata.aaType() == GrAAType::kCoverage ||
750 op.fMetadata.aaType() == GrAAType::kNone);
Robert Phillips29f38542019-10-16 09:20:25 -0400751 } else {
Michael Ludwigadb12e72019-12-04 16:19:18 -0500752 SkASSERT(aaType == GrAAType::kMSAA && op.fMetadata.aaType() == GrAAType::kMSAA);
Robert Phillips29f38542019-10-16 09:20:25 -0400753 }
754 }
Robert Phillipse837e612019-11-15 11:02:50 -0500755
756 SkASSERT(quadCount == this->numChainedQuads());
Robert Phillips29f38542019-10-16 09:20:25 -0400757 }
758#endif
759
Robert Phillipse837e612019-11-15 11:02:50 -0500760#if GR_TEST_UTILS
761 int numQuads() const final { return this->totNumQuads(); }
762#endif
763
Robert Phillipsc554dcf2019-10-28 11:43:55 -0400764 void characterize(PrePreparedDesc* desc) const {
Robert Phillips29f38542019-10-16 09:20:25 -0400765 GrQuad::Type quadType = GrQuad::Type::kAxisAligned;
766 ColorType colorType = ColorType::kNone;
767 GrQuad::Type srcQuadType = GrQuad::Type::kAxisAligned;
768 Domain domain = Domain::kNo;
Michael Ludwigadb12e72019-12-04 16:19:18 -0500769 GrAAType overallAAType = fMetadata.aaType();
Robert Phillips29f38542019-10-16 09:20:25 -0400770
Robert Phillipsc554dcf2019-10-28 11:43:55 -0400771 desc->fNumProxies = 0;
772 desc->fNumTotalQuads = 0;
773 int maxQuadsPerMesh = 0;
Robert Phillips29f38542019-10-16 09:20:25 -0400774
Brian Salomonf7232642018-09-19 08:58:08 -0400775 for (const auto& op : ChainRange<TextureOp>(this)) {
Michael Ludwig425eb452019-06-27 10:13:27 -0400776 if (op.fQuads.deviceQuadType() > quadType) {
777 quadType = op.fQuads.deviceQuadType();
Michael Ludwigf995c052018-11-26 15:24:29 -0500778 }
Michael Ludwig425eb452019-06-27 10:13:27 -0400779 if (op.fQuads.localQuadType() > srcQuadType) {
780 srcQuadType = op.fQuads.localQuadType();
Michael Ludwig009b92e2019-02-15 16:03:53 -0500781 }
Michael Ludwigadb12e72019-12-04 16:19:18 -0500782 if (op.fMetadata.domain() == Domain::kYes) {
Brian Salomonf7232642018-09-19 08:58:08 -0400783 domain = Domain::kYes;
784 }
Michael Ludwigadb12e72019-12-04 16:19:18 -0500785 colorType = SkTMax(colorType, op.fMetadata.colorType());
786 desc->fNumProxies += op.fMetadata.fProxyCount;
Robert Phillipsbbd459d2019-10-29 14:40:03 -0400787
Michael Ludwigadb12e72019-12-04 16:19:18 -0500788 for (unsigned p = 0; p < op.fMetadata.fProxyCount; ++p) {
Greg Daniel549325c2019-10-30 16:19:20 -0400789 maxQuadsPerMesh = SkTMax(op.fViewCountPairs[p].fQuadCnt, maxQuadsPerMesh);
Brian Salomonf7232642018-09-19 08:58:08 -0400790 }
Robert Phillipsbbd459d2019-10-29 14:40:03 -0400791 desc->fNumTotalQuads += op.totNumQuads();
792
Michael Ludwigadb12e72019-12-04 16:19:18 -0500793 if (op.fMetadata.aaType() == GrAAType::kCoverage) {
Robert Phillips29f38542019-10-16 09:20:25 -0400794 overallAAType = GrAAType::kCoverage;
Brian Salomonae7d7702018-10-14 15:05:45 -0400795 }
Brian Salomon34169692017-08-28 15:32:01 -0400796 }
Brian Salomon336ce7b2017-09-08 08:23:58 -0400797
Robert Phillipsbbd459d2019-10-29 14:40:03 -0400798 SkASSERT(desc->fNumTotalQuads == this->numChainedQuads());
799
800 SkASSERT(!CombinedQuadCountWillOverflow(overallAAType, false, desc->fNumTotalQuads));
801
Robert Phillipsc554dcf2019-10-28 11:43:55 -0400802 auto indexBufferOption = GrQuadPerEdgeAA::CalcIndexBufferOption(overallAAType,
803 maxQuadsPerMesh);
804
805 desc->fVertexSpec = VertexSpec(quadType, colorType, srcQuadType, /* hasLocal */ true,
806 domain, overallAAType, /* alpha as coverage */ true,
807 indexBufferOption);
Robert Phillipse837e612019-11-15 11:02:50 -0500808
809 SkASSERT(desc->fNumTotalQuads <= GrQuadPerEdgeAA::QuadLimit(indexBufferOption));
Robert Phillips29f38542019-10-16 09:20:25 -0400810 }
Michael Ludwigc182b942018-11-16 10:27:51 -0500811
Robert Phillipsbbd459d2019-10-29 14:40:03 -0400812 int totNumQuads() const {
813#ifdef SK_DEBUG
814 int tmp = 0;
Michael Ludwigadb12e72019-12-04 16:19:18 -0500815 for (unsigned p = 0; p < fMetadata.fProxyCount; ++p) {
Greg Daniel549325c2019-10-30 16:19:20 -0400816 tmp += fViewCountPairs[p].fQuadCnt;
Robert Phillipsbbd459d2019-10-29 14:40:03 -0400817 }
Michael Ludwigadb12e72019-12-04 16:19:18 -0500818 SkASSERT(tmp == fMetadata.fTotalQuadCount);
Robert Phillipsbbd459d2019-10-29 14:40:03 -0400819#endif
820
Michael Ludwigadb12e72019-12-04 16:19:18 -0500821 return fMetadata.fTotalQuadCount;
Robert Phillipsbbd459d2019-10-29 14:40:03 -0400822 }
823
824 int numChainedQuads() const {
825 int numChainedQuads = this->totNumQuads();
826
827 for (const GrOp* tmp = this->prevInChain(); tmp; tmp = tmp->prevInChain()) {
828 numChainedQuads += ((const TextureOp*)tmp)->totNumQuads();
829 }
830
831 for (const GrOp* tmp = this->nextInChain(); tmp; tmp = tmp->nextInChain()) {
832 numChainedQuads += ((const TextureOp*)tmp)->totNumQuads();
833 }
834
835 return numChainedQuads;
836 }
837
Robert Phillips29f38542019-10-16 09:20:25 -0400838 // onPrePrepareDraws may or may not have been called at this point
839 void onPrepareDraws(Target* target) override {
840 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
Greg Daniel7a82edf2018-12-04 10:54:34 -0500841
Robert Phillips29f38542019-10-16 09:20:25 -0400842 SkDEBUGCODE(this->validate();)
843
Robert Phillips32803ff2019-10-23 08:26:08 -0400844 PrePreparedDesc desc;
Robert Phillips29f38542019-10-16 09:20:25 -0400845
Robert Phillips32803ff2019-10-23 08:26:08 -0400846 if (fPrePreparedDesc) {
847 desc = *fPrePreparedDesc;
Brian Salomonf7232642018-09-19 08:58:08 -0400848 } else {
Robert Phillips61fc7992019-10-22 11:58:17 -0400849 SkArenaAlloc* arena = target->allocator();
850
Robert Phillipsc554dcf2019-10-28 11:43:55 -0400851 this->characterize(&desc);
Robert Phillipsc5a2c752019-10-24 13:11:45 -0400852 desc.allocateCommon(arena, target->appliedClip());
853
Robert Phillipsbbd459d2019-10-29 14:40:03 -0400854 SkASSERT(!desc.fVertices);
Brian Salomonf7232642018-09-19 08:58:08 -0400855 }
Brian Salomon92be2f72018-06-19 14:33:47 -0400856
Robert Phillips32803ff2019-10-23 08:26:08 -0400857 size_t vertexSize = desc.fVertexSpec.vertexSize();
Brian Salomon92be2f72018-06-19 14:33:47 -0400858
Brian Salomon12d22642019-01-29 14:38:50 -0500859 sk_sp<const GrBuffer> vbuffer;
Brian Salomon4b8178f2018-10-12 13:18:27 -0400860 int vertexOffsetInBuffer = 0;
Brian Salomon4b8178f2018-10-12 13:18:27 -0400861
Robert Phillipsbbd459d2019-10-29 14:40:03 -0400862 void* vdata = target->makeVertexSpace(vertexSize, desc.totalNumVertices(),
863 &vbuffer, &vertexOffsetInBuffer);
864 if (!vdata) {
865 SkDebugf("Could not allocate vertices\n");
866 return;
Brian Salomon34169692017-08-28 15:32:01 -0400867 }
Robert Phillipsbbd459d2019-10-29 14:40:03 -0400868
Robert Phillipsfd0c3b52019-11-01 08:44:42 -0400869 sk_sp<const GrBuffer> indexBuffer;
870 if (desc.fVertexSpec.needsIndexBuffer()) {
871 indexBuffer = GrQuadPerEdgeAA::GetIndexBuffer(target,
872 desc.fVertexSpec.indexBufferOption());
873 if (!indexBuffer) {
874 SkDebugf("Could not allocate indices\n");
875 return;
876 }
877 }
878
Robert Phillipsbbd459d2019-10-29 14:40:03 -0400879 // Note: this allocation is always in the flush-time arena (i.e., the flushState)
880 GrMesh* meshes = target->allocMeshes(desc.fNumProxies);
881
882 bool result;
883 if (fPrePreparedDesc) {
884 memcpy(vdata, desc.fVertices, desc.totalSizeInBytes());
885 // The above memcpy filled in the vertex data - just call FillInData to fill in the
886 // mesh data
Robert Phillips2f05a482019-11-25 09:54:43 -0500887 result = FillInData(target->caps(), this, &desc, nullptr, meshes, vertexOffsetInBuffer,
Robert Phillipsfd0c3b52019-11-01 08:44:42 -0400888 std::move(vbuffer), std::move(indexBuffer));
Robert Phillipsbbd459d2019-10-29 14:40:03 -0400889 } else {
890 // Fills in both vertex data and mesh data
Robert Phillips2f05a482019-11-25 09:54:43 -0500891 result = FillInData(target->caps(), this, &desc, (char*) vdata, meshes,
892 vertexOffsetInBuffer, std::move(vbuffer), std::move(indexBuffer));
Robert Phillipsbbd459d2019-10-29 14:40:03 -0400893 }
894
895 if (!result) {
896 return;
897 }
Robert Phillips29f38542019-10-16 09:20:25 -0400898
Robert Phillips7cd0bfe2019-11-20 16:08:10 -0500899 GrGeometryProcessor* gp;
Robert Phillips29f38542019-10-16 09:20:25 -0400900
901 {
Greg Daniel549325c2019-10-30 16:19:20 -0400902 const GrBackendFormat& backendFormat =
Michael Ludwigadb12e72019-12-04 16:19:18 -0500903 fViewCountPairs[0].fProxy->backendFormat();
Robert Phillips29f38542019-10-16 09:20:25 -0400904
905 GrSamplerState samplerState = GrSamplerState(GrSamplerState::WrapMode::kClamp,
Michael Ludwigadb12e72019-12-04 16:19:18 -0500906 fMetadata.filter());
Robert Phillips29f38542019-10-16 09:20:25 -0400907
Robert Phillips7cd0bfe2019-11-20 16:08:10 -0500908 gp = GrQuadPerEdgeAA::MakeTexturedProcessor(target->allocator(),
Robert Phillips32803ff2019-10-23 08:26:08 -0400909 desc.fVertexSpec, *target->caps().shaderCaps(), backendFormat,
Michael Ludwigadb12e72019-12-04 16:19:18 -0500910 samplerState, fMetadata.fSwizzle, std::move(fTextureColorSpaceXform),
911 fMetadata.saturate());
Robert Phillips29f38542019-10-16 09:20:25 -0400912
913 SkASSERT(vertexSize == gp->vertexStride());
914 }
915
Robert Phillips7cd0bfe2019-11-20 16:08:10 -0500916 target->recordDraw(gp, meshes, desc.fNumProxies,
Robert Phillipscea290f2019-11-06 11:21:03 -0500917 desc.fFixedDynamicState, desc.fDynamicStateArrays,
918 desc.fVertexSpec.primitiveType());
Chris Dalton07cdcfc92019-02-26 11:13:22 -0700919 }
920
921 void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
Michael Ludwigadb12e72019-12-04 16:19:18 -0500922 auto pipelineFlags = (GrAAType::kMSAA == fMetadata.aaType())
Chris Daltonbaa1b352019-04-03 12:03:00 -0600923 ? GrPipeline::InputFlags::kHWAntialias
924 : GrPipeline::InputFlags::kNone;
Robert Phillips3968fcb2019-12-05 16:40:31 -0500925
926 auto pipeline = GrSimpleMeshDrawOpHelper::CreatePipeline(flushState,
927 GrProcessorSet::MakeEmptySet(),
928 pipelineFlags);
929
930 flushState->executeDrawsAndUploadsForMeshDrawOp(this, chainBounds, pipeline);
Brian Salomon34169692017-08-28 15:32:01 -0400931 }
932
Brian Salomonf7232642018-09-19 08:58:08 -0400933 CombineResult onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
Brian Salomon5f394272019-07-02 14:07:49 -0400934 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
Brian Salomon34169692017-08-28 15:32:01 -0400935 const auto* that = t->cast<TextureOp>();
Robert Phillips7327c9d2019-10-08 16:32:56 -0400936
Robert Phillips32803ff2019-10-23 08:26:08 -0400937 if (fPrePreparedDesc || that->fPrePreparedDesc) {
Robert Phillips7327c9d2019-10-08 16:32:56 -0400938 // This should never happen (since only DDL recorded ops should be prePrepared)
939 // but, in any case, we should never combine ops that that been prePrepared
940 return CombineResult::kCannotCombine;
941 }
942
Michael Ludwigadb12e72019-12-04 16:19:18 -0500943 if (fMetadata.domain() != that->fMetadata.domain()) {
Michael Ludwig2929f512019-04-19 13:05:56 -0400944 // It is technically possible to combine operations across domain modes, but performance
945 // testing suggests it's better to make more draw calls where some take advantage of
946 // the more optimal shader path without coordinate clamping.
947 return CombineResult::kCannotCombine;
948 }
Brian Osman3ebd3542018-07-30 14:36:53 -0400949 if (!GrColorSpaceXform::Equals(fTextureColorSpaceXform.get(),
950 that->fTextureColorSpaceXform.get())) {
Brian Salomon7eae3e02018-08-07 14:02:38 +0000951 return CombineResult::kCannotCombine;
Brian Osman3ebd3542018-07-30 14:36:53 -0400952 }
Robert Phillipsb69001f2019-10-29 12:16:35 -0400953
Brian Salomonae7d7702018-10-14 15:05:45 -0400954 bool upgradeToCoverageAAOnMerge = false;
Michael Ludwigadb12e72019-12-04 16:19:18 -0500955 if (fMetadata.aaType() != that->fMetadata.aaType()) {
956 if (!CanUpgradeAAOnMerge(fMetadata.aaType(), that->fMetadata.aaType())) {
Brian Salomonae7d7702018-10-14 15:05:45 -0400957 return CombineResult::kCannotCombine;
958 }
959 upgradeToCoverageAAOnMerge = true;
Brian Salomonb5ef1f92018-01-11 11:46:21 -0500960 }
Robert Phillipsb69001f2019-10-29 12:16:35 -0400961
Michael Ludwigadb12e72019-12-04 16:19:18 -0500962 if (CombinedQuadCountWillOverflow(fMetadata.aaType(), upgradeToCoverageAAOnMerge,
Robert Phillipsbbd459d2019-10-29 14:40:03 -0400963 this->numChainedQuads() + that->numChainedQuads())) {
964 return CombineResult::kCannotCombine;
Robert Phillipsb69001f2019-10-29 12:16:35 -0400965 }
966
Michael Ludwigadb12e72019-12-04 16:19:18 -0500967 if (fMetadata.saturate() != that->fMetadata.saturate()) {
Brian Salomonf19f9ca2019-09-18 15:54:26 -0400968 return CombineResult::kCannotCombine;
969 }
Michael Ludwigadb12e72019-12-04 16:19:18 -0500970 if (fMetadata.filter() != that->fMetadata.filter()) {
Brian Salomonf7232642018-09-19 08:58:08 -0400971 return CombineResult::kCannotCombine;
972 }
Michael Ludwigadb12e72019-12-04 16:19:18 -0500973 if (fMetadata.fSwizzle != that->fMetadata.fSwizzle) {
974 return CombineResult::kCannotCombine;
975 }
976 const auto* thisProxy = fViewCountPairs[0].fProxy.get();
977 const auto* thatProxy = that->fViewCountPairs[0].fProxy.get();
978 if (fMetadata.fProxyCount > 1 || that->fMetadata.fProxyCount > 1 ||
979 thisProxy != thatProxy) {
Brian Salomon588cec72018-11-14 13:56:37 -0500980 // We can't merge across different proxies. Check if 'this' can be chained with 'that'.
Greg Daniel45723ac2018-11-30 10:12:43 -0500981 if (GrTextureProxy::ProxiesAreCompatibleAsDynamicState(thisProxy, thatProxy) &&
Michael Ludwigadb12e72019-12-04 16:19:18 -0500982 caps.dynamicStateArrayGeometryProcessorTextureSupport()) {
Brian Salomonf7232642018-09-19 08:58:08 -0400983 return CombineResult::kMayChain;
984 }
Brian Salomon7eae3e02018-08-07 14:02:38 +0000985 return CombineResult::kCannotCombine;
Brian Salomon336ce7b2017-09-08 08:23:58 -0400986 }
Michael Ludwig009b92e2019-02-15 16:03:53 -0500987
Michael Ludwigadb12e72019-12-04 16:19:18 -0500988 fMetadata.fDomain |= that->fMetadata.fDomain;
989 fMetadata.fColorType = SkTMax(fMetadata.fColorType, that->fMetadata.fColorType);
Brian Salomonae7d7702018-10-14 15:05:45 -0400990 if (upgradeToCoverageAAOnMerge) {
Michael Ludwig4384f042019-12-05 10:30:35 -0500991 fMetadata.fAAType = static_cast<uint16_t>(GrAAType::kCoverage);
Brian Salomonae7d7702018-10-14 15:05:45 -0400992 }
Michael Ludwig009b92e2019-02-15 16:03:53 -0500993
Michael Ludwig425eb452019-06-27 10:13:27 -0400994 // Concatenate quad lists together
Michael Ludwig009b92e2019-02-15 16:03:53 -0500995 fQuads.concat(that->fQuads);
Greg Daniel549325c2019-10-30 16:19:20 -0400996 fViewCountPairs[0].fQuadCnt += that->fQuads.count();
Michael Ludwigadb12e72019-12-04 16:19:18 -0500997 fMetadata.fTotalQuadCount += that->fQuads.count();
Michael Ludwig009b92e2019-02-15 16:03:53 -0500998
Brian Salomon7eae3e02018-08-07 14:02:38 +0000999 return CombineResult::kMerged;
Brian Salomon34169692017-08-28 15:32:01 -04001000 }
1001
Michael Ludwig425eb452019-06-27 10:13:27 -04001002 GrQuadBuffer<ColorDomainAndAA> fQuads;
Brian Osman3ebd3542018-07-30 14:36:53 -04001003 sk_sp<GrColorSpaceXform> fTextureColorSpaceXform;
Robert Phillips32803ff2019-10-23 08:26:08 -04001004 // 'fPrePreparedDesc' is only filled in when this op has been prePrepared. In that case,
1005 // it - and the matching dynamic and fixed state - have been allocated in the opPOD arena
1006 // not in the FlushState arena.
1007 PrePreparedDesc* fPrePreparedDesc;
Michael Ludwigadb12e72019-12-04 16:19:18 -05001008 // All configurable state of TextureOp is packed into one field to minimize the op's size.
1009 // Historically, increasing the size of TextureOp has caused surprising perf regressions, so
1010 // consider/measure changes with care.
1011 Metadata fMetadata;
Robert Phillips32803ff2019-10-23 08:26:08 -04001012
1013 // This field must go last. When allocating this op, we will allocate extra space to hold
Greg Daniel549325c2019-10-30 16:19:20 -04001014 // additional ViewCountPairs immediately after the op's allocation so we can treat this
Robert Phillips32803ff2019-10-23 08:26:08 -04001015 // as an fProxyCnt-length array.
Greg Daniel549325c2019-10-30 16:19:20 -04001016 ViewCountPair fViewCountPairs[1];
Brian Salomon336ce7b2017-09-08 08:23:58 -04001017
Brian Salomon34169692017-08-28 15:32:01 -04001018 typedef GrMeshDrawOp INHERITED;
1019};
1020
1021} // anonymous namespace
1022
Robert Phillipse837e612019-11-15 11:02:50 -05001023#if GR_TEST_UTILS
1024uint32_t GrTextureOp::ClassID() {
1025 return TextureOp::ClassID();
1026}
1027#endif
Brian Salomon34169692017-08-28 15:32:01 -04001028
Robert Phillipse837e612019-11-15 11:02:50 -05001029std::unique_ptr<GrDrawOp> GrTextureOp::Make(GrRecordingContext* context,
1030 GrSurfaceProxyView proxyView,
Brian Salomonfc118442019-11-22 19:09:27 -05001031 SkAlphaType alphaType,
Robert Phillipse837e612019-11-15 11:02:50 -05001032 sk_sp<GrColorSpaceXform> textureXform,
1033 GrSamplerState::Filter filter,
1034 const SkPMColor4f& color,
1035 Saturate saturate,
1036 SkBlendMode blendMode,
1037 GrAAType aaType,
1038 GrQuadAAFlags aaFlags,
1039 const GrQuad& deviceQuad,
1040 const GrQuad& localQuad,
1041 const SkRect* domain) {
Michael Ludwig22429f92019-06-27 10:44:48 -04001042 // Apply optimizations that are valid whether or not using GrTextureOp or GrFillRectOp
Michael Ludwigfcdd0612019-11-25 08:34:31 -05001043 if (domain && domain->contains(proxyView.proxy()->backingStoreBoundsRect())) {
Michael Ludwig22429f92019-06-27 10:44:48 -04001044 // No need for a shader-based domain if hardware clamping achieves the same effect
1045 domain = nullptr;
1046 }
1047
1048 if (filter != GrSamplerState::Filter::kNearest && !filter_has_effect(localQuad, deviceQuad)) {
1049 filter = GrSamplerState::Filter::kNearest;
1050 }
1051
1052 if (blendMode == SkBlendMode::kSrcOver) {
Greg Daniel549325c2019-10-30 16:19:20 -04001053 return TextureOp::Make(context, std::move(proxyView), std::move(textureXform), filter,
1054 color, saturate, aaType, aaFlags, deviceQuad, localQuad, domain);
Michael Ludwig22429f92019-06-27 10:44:48 -04001055 } else {
1056 // Emulate complex blending using GrFillRectOp
1057 GrPaint paint;
1058 paint.setColor4f(color);
1059 paint.setXPFactory(SkBlendMode_AsXPFactory(blendMode));
1060
Michael Ludwig8fa469d2019-11-25 16:08:44 -05001061 GrSurfaceProxy* proxy = proxyView.proxy();
Michael Ludwig22429f92019-06-27 10:44:48 -04001062 std::unique_ptr<GrFragmentProcessor> fp;
Brian Salomon7eabfe82019-12-02 14:20:20 -05001063 fp = GrSimpleTextureEffect::Make(sk_ref_sp(proxy), alphaType, SkMatrix::I(), filter);
Michael Ludwig22429f92019-06-27 10:44:48 -04001064 if (domain) {
Michael Ludwig119ac6d2019-11-21 09:26:46 -05001065 // Update domain to match what GrTextureOp would do for bilerp, but don't do any
1066 // normalization since GrTextureDomainEffect handles that and the origin.
1067 SkRect correctedDomain = normalize_domain(filter, {1.f, 1.f, 0.f}, domain);
Brian Salomon7eabfe82019-12-02 14:20:20 -05001068 fp = GrDomainEffect::Make(std::move(fp), correctedDomain, GrTextureDomain::kClamp_Mode,
1069 filter);
Michael Ludwig22429f92019-06-27 10:44:48 -04001070 }
1071 fp = GrColorSpaceXformEffect::Make(std::move(fp), std::move(textureXform));
1072 paint.addColorFragmentProcessor(std::move(fp));
Brian Salomonf19f9ca2019-09-18 15:54:26 -04001073 if (saturate == GrTextureOp::Saturate::kYes) {
1074 paint.addColorFragmentProcessor(GrSaturateProcessor::Make());
1075 }
Michael Ludwig22429f92019-06-27 10:44:48 -04001076
1077 return GrFillRectOp::Make(context, std::move(paint), aaType, aaFlags,
1078 deviceQuad, localQuad);
1079 }
1080}
1081
Robert Phillipse837e612019-11-15 11:02:50 -05001082// A helper class that assists in breaking up bulk API quad draws into manageable chunks.
1083class GrTextureOp::BatchSizeLimiter {
1084public:
1085 BatchSizeLimiter(GrRenderTargetContext* rtc,
1086 const GrClip& clip,
1087 GrRecordingContext* context,
1088 int numEntries,
1089 GrSamplerState::Filter filter,
1090 GrTextureOp::Saturate saturate,
1091 SkCanvas::SrcRectConstraint constraint,
1092 const SkMatrix& viewMatrix,
1093 sk_sp<GrColorSpaceXform> textureColorSpaceXform)
1094 : fRTC(rtc)
1095 , fClip(clip)
1096 , fContext(context)
1097 , fFilter(filter)
1098 , fSaturate(saturate)
1099 , fConstraint(constraint)
1100 , fViewMatrix(viewMatrix)
1101 , fTextureColorSpaceXform(textureColorSpaceXform)
1102 , fNumLeft(numEntries) {
1103 }
Brian Salomon34169692017-08-28 15:32:01 -04001104
Michael Ludwigadb12e72019-12-04 16:19:18 -05001105 void createOp(GrRenderTargetContext::TextureSetEntry set[],
Robert Phillipse837e612019-11-15 11:02:50 -05001106 int clumpSize,
1107 GrAAType aaType) {
Michael Ludwig379e4962019-12-06 13:21:26 -05001108 int clumpProxyCount = proxy_run_count(&set[fNumClumped], clumpSize);
Robert Phillipse837e612019-11-15 11:02:50 -05001109 std::unique_ptr<GrDrawOp> op = TextureOp::Make(fContext, &set[fNumClumped], clumpSize,
Michael Ludwig379e4962019-12-06 13:21:26 -05001110 clumpProxyCount, fFilter, fSaturate, aaType,
Robert Phillipse837e612019-11-15 11:02:50 -05001111 fConstraint, fViewMatrix,
1112 fTextureColorSpaceXform);
1113 fRTC->addDrawOp(fClip, std::move(op));
1114
1115 fNumLeft -= clumpSize;
1116 fNumClumped += clumpSize;
1117 }
1118
1119 int numLeft() const { return fNumLeft; }
1120 int baseIndex() const { return fNumClumped; }
1121
1122private:
1123 GrRenderTargetContext* fRTC;
1124 const GrClip& fClip;
1125 GrRecordingContext* fContext;
1126 GrSamplerState::Filter fFilter;
1127 GrTextureOp::Saturate fSaturate;
1128 SkCanvas::SrcRectConstraint fConstraint;
1129 const SkMatrix& fViewMatrix;
1130 sk_sp<GrColorSpaceXform> fTextureColorSpaceXform;
1131
1132 int fNumLeft;
1133 int fNumClumped = 0; // also the offset for the start of the next clump
1134};
1135
1136// Greedily clump quad draws together until the index buffer limit is exceeded.
Michael Ludwigfe13ca32019-11-21 10:26:41 -05001137void GrTextureOp::AddTextureSetOps(GrRenderTargetContext* rtc,
1138 const GrClip& clip,
1139 GrRecordingContext* context,
Michael Ludwigadb12e72019-12-04 16:19:18 -05001140 GrRenderTargetContext::TextureSetEntry set[],
Michael Ludwigfe13ca32019-11-21 10:26:41 -05001141 int cnt,
Michael Ludwig379e4962019-12-06 13:21:26 -05001142 int proxyRunCnt,
Michael Ludwigfe13ca32019-11-21 10:26:41 -05001143 GrSamplerState::Filter filter,
1144 Saturate saturate,
1145 SkBlendMode blendMode,
1146 GrAAType aaType,
1147 SkCanvas::SrcRectConstraint constraint,
1148 const SkMatrix& viewMatrix,
1149 sk_sp<GrColorSpaceXform> textureColorSpaceXform) {
Michael Ludwig379e4962019-12-06 13:21:26 -05001150 // Ensure that the index buffer limits are lower than the proxy and quad count limits of
1151 // the op's metadata so we don't need to worry about overflow.
1152 SkASSERT(GrResourceProvider::MaxNumNonAAQuads() <= UINT16_MAX &&
1153 GrResourceProvider::MaxNumAAQuads() <= UINT16_MAX);
1154 SkASSERT(proxy_run_count(set, cnt) == proxyRunCnt);
1155
Michael Ludwigfe13ca32019-11-21 10:26:41 -05001156 // First check if we can support batches as a single op
1157 if (blendMode != SkBlendMode::kSrcOver ||
1158 !context->priv().caps()->dynamicStateArrayGeometryProcessorTextureSupport()) {
1159 // Append each entry as its own op; these may still be GrTextureOps if the blend mode is
1160 // src-over but the backend doesn't support dynamic state changes. Otherwise Make()
1161 // automatically creates the appropriate GrFillRectOp to emulate GrTextureOp.
1162 SkMatrix ctm;
1163 for (int i = 0; i < cnt; ++i) {
1164 float alpha = set[i].fAlpha;
1165 ctm = viewMatrix;
1166 if (set[i].fPreViewMatrix) {
1167 ctm.preConcat(*set[i].fPreViewMatrix);
1168 }
Robert Phillipse837e612019-11-15 11:02:50 -05001169
Michael Ludwigfe13ca32019-11-21 10:26:41 -05001170 GrQuad quad, srcQuad;
1171 if (set[i].fDstClipQuad) {
1172 quad = GrQuad::MakeFromSkQuad(set[i].fDstClipQuad, ctm);
1173
1174 SkPoint srcPts[4];
1175 GrMapRectPoints(set[i].fDstRect, set[i].fSrcRect, set[i].fDstClipQuad, srcPts, 4);
1176 srcQuad = GrQuad::MakeFromSkQuad(srcPts, SkMatrix::I());
1177 } else {
1178 quad = GrQuad::MakeFromRect(set[i].fDstRect, ctm);
1179 srcQuad = GrQuad(set[i].fSrcRect);
1180 }
1181
1182 const SkRect* domain = constraint == SkCanvas::kStrict_SrcRectConstraint
1183 ? &set[i].fSrcRect : nullptr;
1184
Brian Salomonfc118442019-11-22 19:09:27 -05001185 auto op = Make(context, set[i].fProxyView, set[i].fSrcAlphaType, textureColorSpaceXform,
Michael Ludwigfe13ca32019-11-21 10:26:41 -05001186 filter, {alpha, alpha, alpha, alpha}, saturate, blendMode, aaType,
1187 set[i].fAAFlags, quad, srcQuad, domain);
1188 rtc->addDrawOp(clip, std::move(op));
1189 }
1190 return;
1191 }
1192
1193 // Second check if we can always just make a single op and avoid the extra iteration
Robert Phillipse837e612019-11-15 11:02:50 -05001194 // needed to clump things together.
1195 if (cnt <= SkTMin(GrResourceProvider::MaxNumNonAAQuads(),
1196 GrResourceProvider::MaxNumAAQuads())) {
Michael Ludwig379e4962019-12-06 13:21:26 -05001197 auto op = TextureOp::Make(context, set, cnt, proxyRunCnt, filter, saturate, aaType,
Robert Phillipse837e612019-11-15 11:02:50 -05001198 constraint, viewMatrix, std::move(textureColorSpaceXform));
1199 rtc->addDrawOp(clip, std::move(op));
1200 return;
1201 }
1202
1203 BatchSizeLimiter state(rtc, clip, context, cnt, filter, saturate, constraint, viewMatrix,
1204 std::move(textureColorSpaceXform));
1205
1206 // kNone and kMSAA never get altered
1207 if (aaType == GrAAType::kNone || aaType == GrAAType::kMSAA) {
1208 // Clump these into series of MaxNumNonAAQuads-sized GrTextureOps
1209 while (state.numLeft() > 0) {
1210 int clumpSize = SkTMin(state.numLeft(), GrResourceProvider::MaxNumNonAAQuads());
1211
1212 state.createOp(set, clumpSize, aaType);
1213 }
1214 } else {
1215 // kCoverage can be downgraded to kNone. Note that the following is conservative. kCoverage
1216 // can also get downgraded to kNone if all the quads are on integer coordinates and
1217 // axis-aligned.
1218 SkASSERT(aaType == GrAAType::kCoverage);
1219
1220 while (state.numLeft() > 0) {
1221 GrAAType runningAA = GrAAType::kNone;
1222 bool clumped = false;
1223
1224 for (int i = 0; i < state.numLeft(); ++i) {
1225 int absIndex = state.baseIndex() + i;
1226
1227 if (set[absIndex].fAAFlags != GrQuadAAFlags::kNone) {
1228
1229 if (i >= GrResourceProvider::MaxNumAAQuads()) {
1230 // Here we either need to boost the AA type to kCoverage, but doing so with
1231 // all the accumulated quads would overflow, or we have a set of AA quads
1232 // that has just gotten too large. In either case, calve off the existing
1233 // quads as their own TextureOp.
1234 state.createOp(
1235 set,
1236 runningAA == GrAAType::kNone ? i : GrResourceProvider::MaxNumAAQuads(),
1237 runningAA); // maybe downgrading AA here
1238 clumped = true;
1239 break;
1240 }
1241
1242 runningAA = GrAAType::kCoverage;
1243 } else if (runningAA == GrAAType::kNone) {
1244
1245 if (i >= GrResourceProvider::MaxNumNonAAQuads()) {
1246 // Here we've found a consistent batch of non-AA quads that has gotten too
1247 // large. Calve it off as its own GrTextureOp.
1248 state.createOp(set, GrResourceProvider::MaxNumNonAAQuads(),
1249 GrAAType::kNone); // definitely downgrading AA here
1250 clumped = true;
1251 break;
1252 }
1253 }
1254 }
1255
1256 if (!clumped) {
1257 // We ran through the above loop w/o hitting a limit. Spit out this last clump of
1258 // quads and call it a day.
1259 state.createOp(set, state.numLeft(), runningAA); // maybe downgrading AA here
1260 }
1261 }
1262 }
1263}
Robert Phillipsae01f622019-11-13 15:56:31 +00001264
Brian Salomon34169692017-08-28 15:32:01 -04001265#if GR_TEST_UTILS
Mike Kleinc0bd9f92019-04-23 12:05:21 -05001266#include "include/private/GrRecordingContext.h"
1267#include "src/gpu/GrProxyProvider.h"
1268#include "src/gpu/GrRecordingContextPriv.h"
Brian Salomon34169692017-08-28 15:32:01 -04001269
1270GR_DRAW_OP_TEST_DEFINE(TextureOp) {
1271 GrSurfaceDesc desc;
1272 desc.fConfig = kRGBA_8888_GrPixelConfig;
1273 desc.fHeight = random->nextULessThan(90) + 10;
1274 desc.fWidth = random->nextULessThan(90) + 10;
Brian Salomon2a4f9832018-03-03 22:43:43 -05001275 auto origin = random->nextBool() ? kTopLeft_GrSurfaceOrigin : kBottomLeft_GrSurfaceOrigin;
Greg Daniel09c94002018-06-08 22:11:51 +00001276 GrMipMapped mipMapped = random->nextBool() ? GrMipMapped::kYes : GrMipMapped::kNo;
1277 SkBackingFit fit = SkBackingFit::kExact;
1278 if (mipMapped == GrMipMapped::kNo) {
1279 fit = random->nextBool() ? SkBackingFit::kApprox : SkBackingFit::kExact;
1280 }
Greg Daniel4065d452018-11-16 15:43:41 -05001281 const GrBackendFormat format =
Robert Phillips0a15cc62019-07-30 12:49:10 -04001282 context->priv().caps()->getDefaultBackendFormat(GrColorType::kRGBA_8888,
1283 GrRenderable::kNo);
Greg Daniel4065d452018-11-16 15:43:41 -05001284
Robert Phillips9da87e02019-02-04 13:26:26 -05001285 GrProxyProvider* proxyProvider = context->priv().proxyProvider();
Brian Salomone8a766b2019-07-19 14:24:36 -04001286 sk_sp<GrTextureProxy> proxy = proxyProvider->createProxy(
Brian Salomon27b4d8d2019-07-22 14:23:45 -04001287 format, desc, GrRenderable::kNo, 1, origin, mipMapped, fit, SkBudgeted::kNo,
Brian Salomone8a766b2019-07-19 14:24:36 -04001288 GrProtected::kNo, GrInternalSurfaceFlags::kNone);
Robert Phillips0bd24dc2018-01-16 08:06:32 -05001289
Brian Salomon34169692017-08-28 15:32:01 -04001290 SkRect rect = GrTest::TestRect(random);
1291 SkRect srcRect;
1292 srcRect.fLeft = random->nextRangeScalar(0.f, proxy->width() / 2.f);
1293 srcRect.fRight = random->nextRangeScalar(0.f, proxy->width()) + proxy->width() / 2.f;
1294 srcRect.fTop = random->nextRangeScalar(0.f, proxy->height() / 2.f);
1295 srcRect.fBottom = random->nextRangeScalar(0.f, proxy->height()) + proxy->height() / 2.f;
1296 SkMatrix viewMatrix = GrTest::TestMatrixPreservesRightAngles(random);
Brian Osman3d139a42018-11-19 10:42:10 -05001297 SkPMColor4f color = SkPMColor4f::FromBytes_RGBA(SkColorToPremulGrColor(random->nextU()));
Brian Salomon2bbdcc42017-09-07 12:36:34 -04001298 GrSamplerState::Filter filter = (GrSamplerState::Filter)random->nextULessThan(
1299 static_cast<uint32_t>(GrSamplerState::Filter::kMipMap) + 1);
Greg Daniel09c94002018-06-08 22:11:51 +00001300 while (mipMapped == GrMipMapped::kNo && filter == GrSamplerState::Filter::kMipMap) {
1301 filter = (GrSamplerState::Filter)random->nextULessThan(
1302 static_cast<uint32_t>(GrSamplerState::Filter::kMipMap) + 1);
1303 }
Brian Osman3ebd3542018-07-30 14:36:53 -04001304 auto texXform = GrTest::TestColorXform(random);
Brian Salomon485b8c62018-01-12 15:11:06 -05001305 GrAAType aaType = GrAAType::kNone;
1306 if (random->nextBool()) {
Chris Dalton6ce447a2019-06-23 18:07:38 -06001307 aaType = (numSamples > 1) ? GrAAType::kMSAA : GrAAType::kCoverage;
Brian Salomon485b8c62018-01-12 15:11:06 -05001308 }
Brian Salomon2213ee92018-10-02 10:44:21 -04001309 GrQuadAAFlags aaFlags = GrQuadAAFlags::kNone;
1310 aaFlags |= random->nextBool() ? GrQuadAAFlags::kLeft : GrQuadAAFlags::kNone;
1311 aaFlags |= random->nextBool() ? GrQuadAAFlags::kTop : GrQuadAAFlags::kNone;
1312 aaFlags |= random->nextBool() ? GrQuadAAFlags::kRight : GrQuadAAFlags::kNone;
1313 aaFlags |= random->nextBool() ? GrQuadAAFlags::kBottom : GrQuadAAFlags::kNone;
Michael Ludwig205224f2019-06-27 10:47:42 -04001314 bool useDomain = random->nextBool();
Brian Salomonf19f9ca2019-09-18 15:54:26 -04001315 auto saturate = random->nextBool() ? GrTextureOp::Saturate::kYes : GrTextureOp::Saturate::kNo;
Greg Daniel549325c2019-10-30 16:19:20 -04001316 GrSurfaceProxyView proxyView(
1317 std::move(proxy), origin,
1318 context->priv().caps()->getTextureSwizzle(format, GrColorType::kRGBA_8888));
Brian Salomonfc118442019-11-22 19:09:27 -05001319 auto alphaType = static_cast<SkAlphaType>(
1320 random->nextRangeU(kUnknown_SkAlphaType + 1, kLastEnum_SkAlphaType));
Greg Daniel549325c2019-10-30 16:19:20 -04001321
Brian Salomonfc118442019-11-22 19:09:27 -05001322 return GrTextureOp::Make(context, std::move(proxyView), alphaType, std::move(texXform), filter,
1323 color, saturate, SkBlendMode::kSrcOver, aaType, aaFlags,
1324 GrQuad::MakeFromRect(rect, viewMatrix), GrQuad(srcRect),
1325 useDomain ? &srcRect : nullptr);
Brian Salomon34169692017-08-28 15:32:01 -04001326}
1327
1328#endif