blob: 8ba5a8b940bb24c40484d3301101c2431e81a40b [file] [log] [blame]
Michael Ludwiga195d102020-09-15 14:51:52 -04001/*
2 * Copyright 2020 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/gpu/GrClipStack.h"
9
10#include "include/core/SkMatrix.h"
11#include "src/core/SkRRectPriv.h"
12#include "src/core/SkRectPriv.h"
13#include "src/core/SkTaskGroup.h"
14#include "src/gpu/GrClip.h"
Adlai Hollera0693042020-10-14 11:23:11 -040015#include "src/gpu/GrDirectContextPriv.h"
Michael Ludwiga195d102020-09-15 14:51:52 -040016#include "src/gpu/GrProxyProvider.h"
17#include "src/gpu/GrRecordingContextPriv.h"
Michael Ludwiga195d102020-09-15 14:51:52 -040018#include "src/gpu/GrSWMaskHelper.h"
19#include "src/gpu/GrStencilMaskHelper.h"
20#include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
21#include "src/gpu/effects/GrBlendFragmentProcessor.h"
22#include "src/gpu/effects/GrConvexPolyEffect.h"
23#include "src/gpu/effects/GrRRectEffect.h"
24#include "src/gpu/effects/GrTextureEffect.h"
25#include "src/gpu/effects/generated/GrAARectEffect.h"
26#include "src/gpu/effects/generated/GrDeviceSpaceEffect.h"
27#include "src/gpu/geometry/GrQuadUtils.h"
28
29namespace {
30
31// This captures which of the two elements in (A op B) would be required when they are combined,
32// where op is intersect or difference.
33enum class ClipGeometry {
34 kEmpty,
35 kAOnly,
36 kBOnly,
37 kBoth
38};
39
40// A and B can be Element, SaveRecord, or Draw. Supported combinations are, order not mattering,
41// (Element, Element), (Element, SaveRecord), (Element, Draw), and (SaveRecord, Draw).
42template<typename A, typename B>
43static ClipGeometry get_clip_geometry(const A& a, const B& b) {
44 // NOTE: SkIRect::Intersects() returns false when two rectangles touch at an edge (so the result
45 // is empty). This behavior is desired for the following clip effect policies.
46 if (a.op() == SkClipOp::kIntersect) {
47 if (b.op() == SkClipOp::kIntersect) {
48 // Intersect (A) + Intersect (B)
49 if (!SkIRect::Intersects(a.outerBounds(), b.outerBounds())) {
50 // Regions with non-zero coverage are disjoint, so intersection = empty
51 return ClipGeometry::kEmpty;
52 } else if (b.contains(a)) {
53 // B's full coverage region contains entirety of A, so intersection = A
54 return ClipGeometry::kAOnly;
55 } else if (a.contains(b)) {
56 // A's full coverage region contains entirety of B, so intersection = B
57 return ClipGeometry::kBOnly;
58 } else {
59 // The shapes intersect in some non-trivial manner
60 return ClipGeometry::kBoth;
61 }
62 } else {
63 SkASSERT(b.op() == SkClipOp::kDifference);
64 // Intersect (A) + Difference (B)
65 if (!SkIRect::Intersects(a.outerBounds(), b.outerBounds())) {
66 // A only intersects B's full coverage region, so intersection = A
67 return ClipGeometry::kAOnly;
68 } else if (b.contains(a)) {
69 // B's zero coverage region completely contains A, so intersection = empty
70 return ClipGeometry::kEmpty;
71 } else {
72 // Intersection cannot be simplified. Note that the combination of a intersect
73 // and difference op in this order cannot produce kBOnly
74 return ClipGeometry::kBoth;
75 }
76 }
77 } else {
78 SkASSERT(a.op() == SkClipOp::kDifference);
79 if (b.op() == SkClipOp::kIntersect) {
80 // Difference (A) + Intersect (B) - the mirror of Intersect(A) + Difference(B),
81 // but combining is commutative so this is equivalent barring naming.
82 if (!SkIRect::Intersects(b.outerBounds(), a.outerBounds())) {
83 // B only intersects A's full coverage region, so intersection = B
84 return ClipGeometry::kBOnly;
85 } else if (a.contains(b)) {
86 // A's zero coverage region completely contains B, so intersection = empty
87 return ClipGeometry::kEmpty;
88 } else {
89 // Cannot be simplified
90 return ClipGeometry::kBoth;
91 }
92 } else {
93 SkASSERT(b.op() == SkClipOp::kDifference);
94 // Difference (A) + Difference (B)
95 if (a.contains(b)) {
96 // A's zero coverage region contains B, so B doesn't remove any extra
97 // coverage from their intersection.
98 return ClipGeometry::kAOnly;
99 } else if (b.contains(a)) {
100 // Mirror of the above case, intersection = B instead
101 return ClipGeometry::kBOnly;
102 } else {
103 // Intersection of the two differences cannot be simplified. Note that for
104 // this op combination it is not possible to produce kEmpty.
105 return ClipGeometry::kBoth;
106 }
107 }
108 }
109}
110
111// a.contains(b) where a's local space is defined by 'aToDevice', and b's possibly separate local
112// space is defined by 'bToDevice'. 'a' and 'b' geometry are provided in their local spaces.
113// Automatically takes into account if the anti-aliasing policies differ. When the policies match,
114// we assume that coverage AA or GPU's non-AA rasterization will apply to A and B equivalently, so
115// we can compare the original shapes. When the modes are mixed, we outset B in device space first.
116static bool shape_contains_rect(
117 const GrShape& a, const SkMatrix& aToDevice, const SkMatrix& deviceToA,
118 const SkRect& b, const SkMatrix& bToDevice, bool mixedAAMode) {
119 if (!a.convex()) {
120 return false;
121 }
122
123 if (!mixedAAMode && aToDevice == bToDevice) {
124 // A and B are in the same coordinate space, so don't bother mapping
125 return a.conservativeContains(b);
Michael Ludwigd30e9ef2020-09-28 12:03:01 -0400126 } else if (bToDevice.isIdentity() && aToDevice.preservesAxisAlignment()) {
Michael Ludwig84a008f2020-09-18 15:30:55 -0400127 // Optimize the common case of draws (B, with identity matrix) and axis-aligned shapes,
128 // instead of checking the four corners separately.
129 SkRect bInA = b;
130 if (mixedAAMode) {
131 bInA.outset(0.5f, 0.5f);
132 }
133 SkAssertResult(deviceToA.mapRect(&bInA));
134 return a.conservativeContains(bInA);
Michael Ludwiga195d102020-09-15 14:51:52 -0400135 }
136
137 // Test each corner for contains; since a is convex, if all 4 corners of b's bounds are
138 // contained, then the entirety of b is within a.
139 GrQuad deviceQuad = GrQuad::MakeFromRect(b, bToDevice);
140 if (any(deviceQuad.w4f() < SkPathPriv::kW0PlaneDistance)) {
141 // Something in B actually projects behind the W = 0 plane and would be clipped to infinity,
142 // so it's extremely unlikely that A can contain B.
143 return false;
144 }
145 if (mixedAAMode) {
146 // Outset it so its edges are 1/2px out, giving us a buffer to avoid cases where a non-AA
147 // clip or draw would snap outside an aa element.
148 GrQuadUtils::Outset({0.5f, 0.5f, 0.5f, 0.5f}, &deviceQuad);
149 }
150
151 for (int i = 0; i < 4; ++i) {
152 SkPoint cornerInA = deviceQuad.point(i);
153 deviceToA.mapPoints(&cornerInA, 1);
154 if (!a.conservativeContains(cornerInA)) {
155 return false;
156 }
157 }
158
159 return true;
160}
161
162static SkIRect subtract(const SkIRect& a, const SkIRect& b, bool exact) {
163 SkIRect diff;
164 if (SkRectPriv::Subtract(a, b, &diff) || !exact) {
165 // Either A-B is exactly the rectangle stored in diff, or we don't need an exact answer
166 // and can settle for the subrect of A excluded from B (which is also 'diff')
167 return diff;
168 } else {
169 // For our purposes, we want the original A when A-B cannot be exactly represented
170 return a;
171 }
172}
173
174static GrClipEdgeType get_clip_edge_type(SkClipOp op, GrAA aa) {
175 if (op == SkClipOp::kIntersect) {
176 return aa == GrAA::kYes ? GrClipEdgeType::kFillAA : GrClipEdgeType::kFillBW;
177 } else {
178 return aa == GrAA::kYes ? GrClipEdgeType::kInverseFillAA : GrClipEdgeType::kInverseFillBW;
179 }
180}
181
182static uint32_t kInvalidGenID = 0;
183static uint32_t kEmptyGenID = 1;
184static uint32_t kWideOpenGenID = 2;
185
186static uint32_t next_gen_id() {
187 // 0-2 are reserved for invalid, empty & wide-open
188 static const uint32_t kFirstUnreservedGenID = 3;
189 static std::atomic<uint32_t> nextID{kFirstUnreservedGenID};
190
191 uint32_t id;
192 do {
Adlai Holler4888cda2020-11-06 16:37:37 -0500193 id = nextID.fetch_add(1, std::memory_order_relaxed);
Michael Ludwiga195d102020-09-15 14:51:52 -0400194 } while (id < kFirstUnreservedGenID);
195 return id;
196}
197
198// Functions for rendering / applying clip shapes in various ways
199// The general strategy is:
200// - Represent the clip element as an analytic FP that tests sk_FragCoord vs. its device shape
201// - Render the clip element to the stencil, if stencil is allowed and supports the AA, and the
202// size of the element indicates stenciling will be worth it, vs. making a mask.
203// - Try to put the individual element into a clip atlas, which is then sampled during the draw
204// - Render the element into a SW mask and upload it. If possible, the SW rasterization happens
205// in parallel.
206static constexpr GrSurfaceOrigin kMaskOrigin = kTopLeft_GrSurfaceOrigin;
207
208static GrFPResult analytic_clip_fp(const GrClipStack::Element& e,
209 const GrShaderCaps& caps,
210 std::unique_ptr<GrFragmentProcessor> fp) {
211 // All analytic clip shape FPs need to be in device space
212 GrClipEdgeType edgeType = get_clip_edge_type(e.fOp, e.fAA);
213 if (e.fLocalToDevice.isIdentity()) {
214 if (e.fShape.isRect()) {
215 return GrFPSuccess(GrAARectEffect::Make(std::move(fp), edgeType, e.fShape.rect()));
216 } else if (e.fShape.isRRect()) {
217 return GrRRectEffect::Make(std::move(fp), edgeType, e.fShape.rrect(), caps);
218 }
219 }
220
221 // A convex hull can be transformed into device space (this will handle rect shapes with a
222 // non-identity transform).
223 if (e.fShape.segmentMask() == SkPath::kLine_SegmentMask && e.fShape.convex()) {
224 SkPath devicePath;
225 e.fShape.asPath(&devicePath);
226 devicePath.transform(e.fLocalToDevice);
227 return GrConvexPolyEffect::Make(std::move(fp), edgeType, devicePath);
228 }
229
230 return GrFPFailure(std::move(fp));
231}
232
233// TODO: Currently this only works with CCPR because CCPR owns and manages the clip atlas. The
234// high-level concept should be generalized to support any path renderer going into a shared atlas.
235static std::unique_ptr<GrFragmentProcessor> clip_atlas_fp(GrCoverageCountingPathRenderer* ccpr,
236 uint32_t opsTaskID,
237 const SkIRect& bounds,
238 const GrClipStack::Element& e,
239 SkPath* devicePath,
240 const GrCaps& caps,
241 std::unique_ptr<GrFragmentProcessor> fp) {
242 // TODO: Currently the atlas manages device-space paths, so we have to transform by the ctm.
243 // In the future, the atlas manager should see the local path and the ctm so that it can
244 // cache across integer-only translations (internally, it already does this, just not exposed).
245 if (devicePath->isEmpty()) {
246 e.fShape.asPath(devicePath);
247 devicePath->transform(e.fLocalToDevice);
248 SkASSERT(!devicePath->isEmpty());
249 }
250
251 SkASSERT(!devicePath->isInverseFillType());
252 if (e.fOp == SkClipOp::kIntersect) {
253 return ccpr->makeClipProcessor(std::move(fp), opsTaskID, *devicePath, bounds, caps);
254 } else {
255 // Use kDstOut to convert the non-inverted mask alpha into (1-alpha), so the atlas only
256 // ever renders non-inverse filled paths.
257 // - When the input FP is null, this turns into "(1-sample(ccpr, 1).a) * input"
258 // - When not null, it works out to
259 // (1-sample(ccpr, input.rgb1).a) * sample(fp, input.rgb1) * input.a
260 // - Since clips only care about the alpha channel, these are both equivalent to the
261 // desired product of (1-ccpr) * fp * input.a.
262 return GrBlendFragmentProcessor::Make(
263 ccpr->makeClipProcessor(nullptr, opsTaskID, *devicePath, bounds, caps), // src
264 std::move(fp), // dst
265 SkBlendMode::kDstOut);
266 }
267}
268
269static void draw_to_sw_mask(GrSWMaskHelper* helper, const GrClipStack::Element& e, bool clearMask) {
270 // If the first element to draw is an intersect, we clear to 0 and will draw it directly with
271 // coverage 1 (subsequent intersect elements will be inverse-filled and draw 0 outside).
272 // If the first element to draw is a difference, we clear to 1, and in all cases we draw the
273 // difference element directly with coverage 0.
274 if (clearMask) {
275 helper->clear(e.fOp == SkClipOp::kIntersect ? 0x00 : 0xFF);
276 }
277
278 uint8_t alpha;
279 bool invert;
280 if (e.fOp == SkClipOp::kIntersect) {
281 // Intersect modifies pixels outside of its geometry. If this isn't the first op, we
282 // draw the inverse-filled shape with 0 coverage to erase everything outside the element
283 // But if we are the first element, we can draw directly with coverage 1 since we
284 // cleared to 0.
285 if (clearMask) {
286 alpha = 0xFF;
287 invert = false;
288 } else {
289 alpha = 0x00;
290 invert = true;
291 }
292 } else {
293 // For difference ops, can always just subtract the shape directly by drawing 0 coverage
294 SkASSERT(e.fOp == SkClipOp::kDifference);
295 alpha = 0x00;
296 invert = false;
297 }
298
299 // Draw the shape; based on how we've initialized the buffer and chosen alpha+invert,
300 // every element is drawn with the kReplace_Op
301 if (invert) {
302 // Must invert the path
303 SkASSERT(!e.fShape.inverted());
304 // TODO: this is an extra copy effectively, just so we can toggle inversion; would be
305 // better perhaps to just call a drawPath() since we know it'll use path rendering w/
306 // the inverse fill type.
307 GrShape inverted(e.fShape);
308 inverted.setInverted(true);
309 helper->drawShape(inverted, e.fLocalToDevice, SkRegion::kReplace_Op, e.fAA, alpha);
310 } else {
311 helper->drawShape(e.fShape, e.fLocalToDevice, SkRegion::kReplace_Op, e.fAA, alpha);
312 }
313}
314
315static GrSurfaceProxyView render_sw_mask(GrRecordingContext* context, const SkIRect& bounds,
316 const GrClipStack::Element** elements, int count) {
317 SkASSERT(count > 0);
318
Adlai Holler990a0d82021-02-05 13:40:51 -0500319 SkTArray<GrClipStack::Element> data(count);
320 for (int i = 0; i < count; ++i) {
321 data.push_back(*(elements[i]));
Michael Ludwiga195d102020-09-15 14:51:52 -0400322 }
Adlai Holler990a0d82021-02-05 13:40:51 -0500323 return GrSWMaskHelper::MakeTexture(bounds,
324 context,
325 SkBackingFit::kApprox,
326 [data{std::move(data)}](GrSWMaskHelper* helper) {
327 TRACE_EVENT0("skia.gpu", "SW Clip Mask Render");
328 for (int i = 0; i < data.count(); ++i) {
329 draw_to_sw_mask(helper, data[i], i == 0);
Michael Ludwiga195d102020-09-15 14:51:52 -0400330 }
Adlai Holler990a0d82021-02-05 13:40:51 -0500331 });
Michael Ludwiga195d102020-09-15 14:51:52 -0400332}
333
Brian Salomoneebe7352020-12-09 16:37:04 -0500334static void render_stencil_mask(GrRecordingContext* context, GrSurfaceDrawContext* rtc,
Michael Ludwiga195d102020-09-15 14:51:52 -0400335 uint32_t genID, const SkIRect& bounds,
336 const GrClipStack::Element** elements, int count,
337 GrAppliedClip* out) {
338 GrStencilMaskHelper helper(context, rtc);
339 if (helper.init(bounds, genID, out->windowRectsState().windows(), 0)) {
340 // This follows the same logic as in draw_sw_mask
341 bool startInside = elements[0]->fOp == SkClipOp::kDifference;
342 helper.clear(startInside);
343 for (int i = 0; i < count; ++i) {
344 const GrClipStack::Element& e = *(elements[i]);
345 SkRegion::Op op;
346 if (e.fOp == SkClipOp::kIntersect) {
347 op = (i == 0) ? SkRegion::kReplace_Op : SkRegion::kIntersect_Op;
348 } else {
349 op = SkRegion::kDifference_Op;
350 }
351 helper.drawShape(e.fShape, e.fLocalToDevice, op, e.fAA);
352 }
353 helper.finish();
354 }
355 out->hardClip().addStencilClip(genID);
356}
357
358} // anonymous namespace
359
360class GrClipStack::Draw {
361public:
362 Draw(const SkRect& drawBounds, GrAA aa)
363 : fBounds(GrClip::GetPixelIBounds(drawBounds, aa, BoundsType::kExterior))
364 , fAA(aa) {
365 // Be slightly more forgiving on whether or not a draw is inside a clip element.
366 fOriginalBounds = drawBounds.makeInset(GrClip::kBoundsTolerance, GrClip::kBoundsTolerance);
367 if (fOriginalBounds.isEmpty()) {
368 fOriginalBounds = drawBounds;
369 }
370 }
371
372 // Common clip type interface
373 SkClipOp op() const { return SkClipOp::kIntersect; }
374 const SkIRect& outerBounds() const { return fBounds; }
375
376 // Draw does not have inner bounds so cannot contain anything.
377 bool contains(const RawElement& e) const { return false; }
378 bool contains(const SaveRecord& s) const { return false; }
379
380 bool applyDeviceBounds(const SkIRect& deviceBounds) {
381 return fBounds.intersect(deviceBounds);
382 }
383
384 const SkRect& bounds() const { return fOriginalBounds; }
385 GrAA aa() const { return fAA; }
386
387private:
388 SkRect fOriginalBounds;
389 SkIRect fBounds;
390 GrAA fAA;
391};
392
393///////////////////////////////////////////////////////////////////////////////
394// GrClipStack::Element
395
396GrClipStack::RawElement::RawElement(const SkMatrix& localToDevice, const GrShape& shape,
397 GrAA aa, SkClipOp op)
398 : Element{shape, localToDevice, op, aa}
399 , fInnerBounds(SkIRect::MakeEmpty())
400 , fOuterBounds(SkIRect::MakeEmpty())
401 , fInvalidatedByIndex(-1) {
402 if (!localToDevice.invert(&fDeviceToLocal)) {
403 // If the transform can't be inverted, it means that two dimensions are collapsed to 0 or
404 // 1 dimension, making the device-space geometry effectively empty.
405 fShape.reset();
406 }
407}
408
409void GrClipStack::RawElement::markInvalid(const SaveRecord& current) {
410 SkASSERT(!this->isInvalid());
411 fInvalidatedByIndex = current.firstActiveElementIndex();
412}
413
414void GrClipStack::RawElement::restoreValid(const SaveRecord& current) {
415 if (current.firstActiveElementIndex() < fInvalidatedByIndex) {
416 fInvalidatedByIndex = -1;
417 }
418}
419
420bool GrClipStack::RawElement::contains(const Draw& d) const {
421 if (fInnerBounds.contains(d.outerBounds())) {
422 return true;
423 } else {
424 // If the draw is non-AA, use the already computed outer bounds so we don't need to use
425 // device-space outsetting inside shape_contains_rect.
426 SkRect queryBounds = d.aa() == GrAA::kYes ? d.bounds() : SkRect::Make(d.outerBounds());
427 return shape_contains_rect(fShape, fLocalToDevice, fDeviceToLocal,
428 queryBounds, SkMatrix::I(), /* mixed-aa */ false);
429 }
430}
431
432bool GrClipStack::RawElement::contains(const SaveRecord& s) const {
433 if (fInnerBounds.contains(s.outerBounds())) {
434 return true;
435 } else {
436 // This is very similar to contains(Draw) but we just have outerBounds to work with.
437 SkRect queryBounds = SkRect::Make(s.outerBounds());
438 return shape_contains_rect(fShape, fLocalToDevice, fDeviceToLocal,
439 queryBounds, SkMatrix::I(), /* mixed-aa */ false);
440 }
441}
442
443bool GrClipStack::RawElement::contains(const RawElement& e) const {
444 // This is similar to how RawElement checks containment for a Draw, except that both the tester
445 // and testee have a transform that needs to be considered.
446 if (fInnerBounds.contains(e.fOuterBounds)) {
447 return true;
448 }
449
450 bool mixedAA = fAA != e.fAA;
451 if (!mixedAA && fLocalToDevice == e.fLocalToDevice) {
452 // Test the shapes directly against each other, with a special check for a rrect+rrect
453 // containment (a intersect b == a implies b contains a) and paths (same gen ID, or same
454 // path for small paths means they contain each other).
455 static constexpr int kMaxPathComparePoints = 16;
456 if (fShape.isRRect() && e.fShape.isRRect()) {
457 return SkRRectPriv::ConservativeIntersect(fShape.rrect(), e.fShape.rrect())
458 == e.fShape.rrect();
459 } else if (fShape.isPath() && e.fShape.isPath()) {
460 return fShape.path().getGenerationID() == e.fShape.path().getGenerationID() ||
461 (fShape.path().getPoints(nullptr, 0) <= kMaxPathComparePoints &&
462 fShape.path() == e.fShape.path());
463 } // else fall through to shape_contains_rect
464 }
465
466 return shape_contains_rect(fShape, fLocalToDevice, fDeviceToLocal,
467 e.fShape.bounds(), e.fLocalToDevice, mixedAA);
468
469}
470
471void GrClipStack::RawElement::simplify(const SkIRect& deviceBounds, bool forceAA) {
472 // Make sure the shape is not inverted. An inverted shape is equivalent to a non-inverted shape
473 // with the clip op toggled.
474 if (fShape.inverted()) {
475 fOp = fOp == SkClipOp::kIntersect ? SkClipOp::kDifference : SkClipOp::kIntersect;
476 fShape.setInverted(false);
477 }
478
479 // Then simplify the base shape, if it becomes empty, no need to update the bounds
480 fShape.simplify();
481 SkASSERT(!fShape.inverted());
482 if (fShape.isEmpty()) {
483 return;
484 }
485
486 // Lines and points should have been turned into empty since we assume everything is filled
487 SkASSERT(!fShape.isPoint() && !fShape.isLine());
488 // Validity check, we have no public API to create an arc at the moment
489 SkASSERT(!fShape.isArc());
490
491 SkRect outer = fLocalToDevice.mapRect(fShape.bounds());
492 if (!outer.intersect(SkRect::Make(deviceBounds))) {
493 // A non-empty shape is offscreen, so treat it as empty
494 fShape.reset();
495 return;
496 }
497
Michael Ludwig462bdfc2020-09-22 16:27:04 -0400498 // Except for axis-aligned clip rects, upgrade to AA when forced. We skip axis-aligned clip
499 // rects because a non-AA axis aligned rect can always be set as just a scissor test or window
500 // rect, avoiding an expensive stencil mask generation.
Michael Ludwigd30e9ef2020-09-28 12:03:01 -0400501 if (forceAA && !(fShape.isRect() && fLocalToDevice.preservesAxisAlignment())) {
Michael Ludwiga195d102020-09-15 14:51:52 -0400502 fAA = GrAA::kYes;
503 }
504
505 // Except for non-AA axis-aligned rects, the outer bounds is the rounded-out device-space
506 // mapped bounds of the shape.
507 fOuterBounds = GrClip::GetPixelIBounds(outer, fAA, BoundsType::kExterior);
508
Michael Ludwigd30e9ef2020-09-28 12:03:01 -0400509 if (fLocalToDevice.preservesAxisAlignment()) {
Michael Ludwiga195d102020-09-15 14:51:52 -0400510 if (fShape.isRect()) {
511 // The actual geometry can be updated to the device-intersected bounds and we can
512 // know the inner bounds
513 fShape.rect() = outer;
514 fLocalToDevice.setIdentity();
515 fDeviceToLocal.setIdentity();
516
517 if (fAA == GrAA::kNo && outer.width() >= 1.f && outer.height() >= 1.f) {
518 // NOTE: Legacy behavior to avoid performance regressions. For non-aa axis-aligned
519 // clip rects we always just round so that they can be scissor-only (avoiding the
520 // uncertainty in how a GPU might actually round an edge on fractional coords).
521 fOuterBounds = outer.round();
522 fInnerBounds = fOuterBounds;
523 } else {
524 fInnerBounds = GrClip::GetPixelIBounds(outer, fAA, BoundsType::kInterior);
525 SkASSERT(fOuterBounds.contains(fInnerBounds) || fInnerBounds.isEmpty());
526 }
527 } else if (fShape.isRRect()) {
Michael Ludwig3dad8032020-09-28 11:24:05 -0400528 // Can't transform in place and must still check transform result since some very
529 // ill-formed scale+translate matrices can cause invalid rrect radii.
530 SkRRect src;
531 if (fShape.rrect().transform(fLocalToDevice, &src)) {
532 fShape.rrect() = src;
533 fLocalToDevice.setIdentity();
534 fDeviceToLocal.setIdentity();
Michael Ludwiga195d102020-09-15 14:51:52 -0400535
Michael Ludwig3dad8032020-09-28 11:24:05 -0400536 SkRect inner = SkRRectPriv::InnerBounds(fShape.rrect());
537 fInnerBounds = GrClip::GetPixelIBounds(inner, fAA, BoundsType::kInterior);
538 if (!fInnerBounds.intersect(deviceBounds)) {
539 fInnerBounds = SkIRect::MakeEmpty();
540 }
Michael Ludwiga195d102020-09-15 14:51:52 -0400541 }
542 }
543 }
544
545 if (fOuterBounds.isEmpty()) {
546 // This can happen if we have non-AA shapes smaller than a pixel that do not cover a pixel
547 // center. We could round out, but rasterization would still result in an empty clip.
548 fShape.reset();
549 }
550
551 // Post-conditions on inner and outer bounds
552 SkASSERT(fShape.isEmpty() || (!fOuterBounds.isEmpty() && deviceBounds.contains(fOuterBounds)));
553 SkASSERT(fShape.isEmpty() || fInnerBounds.isEmpty() || fOuterBounds.contains(fInnerBounds));
554}
555
556bool GrClipStack::RawElement::combine(const RawElement& other, const SaveRecord& current) {
557 // To reduce the number of possibilities, only consider intersect+intersect. Difference and
558 // mixed op cases could be analyzed to simplify one of the shapes, but that is a rare
559 // occurrence and the math is much more complicated.
560 if (other.fOp != SkClipOp::kIntersect || fOp != SkClipOp::kIntersect) {
561 return false;
562 }
563
564 // At the moment, only rect+rect or rrect+rrect are supported (although rect+rrect is
565 // treated as a degenerate case of rrect+rrect).
566 bool shapeUpdated = false;
567 if (fShape.isRect() && other.fShape.isRect()) {
568 bool aaMatch = fAA == other.fAA;
569 if (fLocalToDevice.isIdentity() && other.fLocalToDevice.isIdentity() && !aaMatch) {
570 if (GrClip::IsPixelAligned(fShape.rect())) {
571 // Our AA type doesn't really matter, take other's since its edges may not be
572 // pixel aligned, so after intersection clip behavior should respect its aa type.
573 fAA = other.fAA;
574 } else if (!GrClip::IsPixelAligned(other.fShape.rect())) {
575 // Neither shape is pixel aligned and AA types don't match so can't combine
576 return false;
577 }
578 // Either we've updated this->fAA to actually match, or other->fAA doesn't matter so
579 // this can be set to true. We just can't modify other to set it's aa to this->fAA.
580 // But since 'this' becomes the combo of the two, other will be deleted so that's fine.
581 aaMatch = true;
582 }
583
584 if (aaMatch && fLocalToDevice == other.fLocalToDevice) {
585 if (!fShape.rect().intersect(other.fShape.rect())) {
586 // By floating point, it turns out the combination should be empty
587 this->fShape.reset();
588 this->markInvalid(current);
589 return true;
590 }
591 shapeUpdated = true;
592 }
593 } else if ((fShape.isRect() || fShape.isRRect()) &&
594 (other.fShape.isRect() || other.fShape.isRRect())) {
595 // No such pixel-aligned disregard for AA for round rects
596 if (fAA == other.fAA && fLocalToDevice == other.fLocalToDevice) {
597 // Treat rrect+rect intersections as rrect+rrect
598 SkRRect a = fShape.isRect() ? SkRRect::MakeRect(fShape.rect()) : fShape.rrect();
599 SkRRect b = other.fShape.isRect() ? SkRRect::MakeRect(other.fShape.rect())
600 : other.fShape.rrect();
601
602 SkRRect joined = SkRRectPriv::ConservativeIntersect(a, b);
603 if (!joined.isEmpty()) {
604 // Can reduce to a single element
605 if (joined.isRect()) {
606 // And with a simplified type
607 fShape.setRect(joined.rect());
608 } else {
609 fShape.setRRect(joined);
610 }
611 shapeUpdated = true;
612 } else if (!a.getBounds().intersects(b.getBounds())) {
613 // Like the rect+rect combination, the intersection is actually empty
614 fShape.reset();
615 this->markInvalid(current);
616 return true;
617 }
618 }
619 }
620
621 if (shapeUpdated) {
622 // This logic works under the assumption that both combined elements were intersect, so we
623 // don't do the full bounds computations like in simplify().
624 SkASSERT(fOp == SkClipOp::kIntersect && other.fOp == SkClipOp::kIntersect);
625 SkAssertResult(fOuterBounds.intersect(other.fOuterBounds));
626 if (!fInnerBounds.intersect(other.fInnerBounds)) {
627 fInnerBounds = SkIRect::MakeEmpty();
628 }
629 return true;
630 } else {
631 return false;
632 }
633}
634
635void GrClipStack::RawElement::updateForElement(RawElement* added, const SaveRecord& current) {
636 if (this->isInvalid()) {
637 // Already doesn't do anything, so skip this element
638 return;
639 }
640
641 // 'A' refers to this element, 'B' refers to 'added'.
642 switch (get_clip_geometry(*this, *added)) {
643 case ClipGeometry::kEmpty:
644 // Mark both elements as invalid to signal that the clip is fully empty
645 this->markInvalid(current);
646 added->markInvalid(current);
647 break;
648
649 case ClipGeometry::kAOnly:
650 // This element already clips more than 'added', so mark 'added' is invalid to skip it
651 added->markInvalid(current);
652 break;
653
654 case ClipGeometry::kBOnly:
655 // 'added' clips more than this element, so mark this as invalid
656 this->markInvalid(current);
657 break;
658
659 case ClipGeometry::kBoth:
660 // Else the bounds checks think we need to keep both, but depending on the combination
661 // of the ops and shape kinds, we may be able to do better.
662 if (added->combine(*this, current)) {
663 // 'added' now fully represents the combination of the two elements
664 this->markInvalid(current);
665 }
666 break;
667 }
668}
669
670GrClipStack::ClipState GrClipStack::RawElement::clipType() const {
671 // Map from the internal shape kind to the clip state enum
672 switch (fShape.type()) {
673 case GrShape::Type::kEmpty:
674 return ClipState::kEmpty;
675
676 case GrShape::Type::kRect:
677 return fOp == SkClipOp::kIntersect && fLocalToDevice.isIdentity()
678 ? ClipState::kDeviceRect : ClipState::kComplex;
679
680 case GrShape::Type::kRRect:
681 return fOp == SkClipOp::kIntersect && fLocalToDevice.isIdentity()
682 ? ClipState::kDeviceRRect : ClipState::kComplex;
683
684 case GrShape::Type::kArc:
685 case GrShape::Type::kLine:
686 case GrShape::Type::kPoint:
687 // These types should never become RawElements
688 SkASSERT(false);
689 [[fallthrough]];
690
691 case GrShape::Type::kPath:
692 return ClipState::kComplex;
693 }
694 SkUNREACHABLE;
695}
696
697///////////////////////////////////////////////////////////////////////////////
698// GrClipStack::Mask
699
700GrClipStack::Mask::Mask(const SaveRecord& current, const SkIRect& drawBounds)
701 : fBounds(drawBounds)
702 , fGenID(current.genID()) {
703 static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
704
705 // The gen ID should not be invalid, empty, or wide open, since those do not require masks
706 SkASSERT(fGenID != kInvalidGenID && fGenID != kEmptyGenID && fGenID != kWideOpenGenID);
707
708 GrUniqueKey::Builder builder(&fKey, kDomain, 3, "clip_mask");
709 builder[0] = fGenID;
710 // SkToS16 because image filters outset layers to a size indicated by the filter, which can
711 // sometimes result in negative coordinates from device space.
712 builder[1] = SkToS16(drawBounds.fLeft) | (SkToS16(drawBounds.fRight) << 16);
713 builder[2] = SkToS16(drawBounds.fTop) | (SkToS16(drawBounds.fBottom) << 16);
714 SkASSERT(fKey.isValid());
715
716 SkDEBUGCODE(fOwner = &current;)
717}
718
719bool GrClipStack::Mask::appliesToDraw(const SaveRecord& current, const SkIRect& drawBounds) const {
720 // For the same save record, a larger mask will have the same or more elements
721 // baked into it, so it can be reused to clip the smaller draw.
722 SkASSERT(fGenID != current.genID() || &current == fOwner);
723 return fGenID == current.genID() && fBounds.contains(drawBounds);
724}
725
726void GrClipStack::Mask::invalidate(GrProxyProvider* proxyProvider) {
727 SkASSERT(proxyProvider);
728 SkASSERT(fKey.isValid()); // Should only be invalidated once
729 proxyProvider->processInvalidUniqueKey(
730 fKey, nullptr, GrProxyProvider::InvalidateGPUResource::kYes);
731 fKey.reset();
732}
733
734///////////////////////////////////////////////////////////////////////////////
735// GrClipStack::SaveRecord
736
737GrClipStack::SaveRecord::SaveRecord(const SkIRect& deviceBounds)
738 : fInnerBounds(deviceBounds)
739 , fOuterBounds(deviceBounds)
740 , fShader(nullptr)
741 , fStartingMaskIndex(0)
742 , fStartingElementIndex(0)
743 , fOldestValidIndex(0)
744 , fDeferredSaveCount(0)
745 , fStackOp(SkClipOp::kIntersect)
746 , fState(ClipState::kWideOpen)
747 , fGenID(kInvalidGenID) {}
748
749GrClipStack::SaveRecord::SaveRecord(const SaveRecord& prior,
750 int startingMaskIndex,
751 int startingElementIndex)
752 : fInnerBounds(prior.fInnerBounds)
753 , fOuterBounds(prior.fOuterBounds)
754 , fShader(prior.fShader)
755 , fStartingMaskIndex(startingMaskIndex)
756 , fStartingElementIndex(startingElementIndex)
757 , fOldestValidIndex(prior.fOldestValidIndex)
758 , fDeferredSaveCount(0)
759 , fStackOp(prior.fStackOp)
760 , fState(prior.fState)
761 , fGenID(kInvalidGenID) {
762 // If the prior record never needed a mask, this one will insert into the same index
763 // (that's okay since we'll remove it when this record is popped off the stack).
764 SkASSERT(startingMaskIndex >= prior.fStartingMaskIndex);
765 // The same goes for elements (the prior could have been wide open).
766 SkASSERT(startingElementIndex >= prior.fStartingElementIndex);
767}
768
769uint32_t GrClipStack::SaveRecord::genID() const {
770 if (fState == ClipState::kEmpty) {
771 return kEmptyGenID;
772 } else if (fState == ClipState::kWideOpen) {
773 return kWideOpenGenID;
774 } else {
775 // The gen ID shouldn't be empty or wide open, since they are reserved for the above
776 // if-cases. It may be kInvalid if the record hasn't had any elements added to it yet.
777 SkASSERT(fGenID != kEmptyGenID && fGenID != kWideOpenGenID);
778 return fGenID;
779 }
780}
781
782GrClipStack::ClipState GrClipStack::SaveRecord::state() const {
783 if (fShader && fState != ClipState::kEmpty) {
784 return ClipState::kComplex;
785 } else {
786 return fState;
787 }
788}
789
790bool GrClipStack::SaveRecord::contains(const GrClipStack::Draw& draw) const {
791 return fInnerBounds.contains(draw.outerBounds());
792}
793
794bool GrClipStack::SaveRecord::contains(const GrClipStack::RawElement& element) const {
795 return fInnerBounds.contains(element.outerBounds());
796}
797
798void GrClipStack::SaveRecord::removeElements(RawElement::Stack* elements) {
799 while (elements->count() > fStartingElementIndex) {
800 elements->pop_back();
801 }
802}
803
804void GrClipStack::SaveRecord::restoreElements(RawElement::Stack* elements) {
805 // Presumably this SaveRecord is the new top of the stack, and so it owns the elements
806 // from its starting index to restoreCount - 1. Elements from the old save record have
807 // been destroyed already, so their indices would have been >= restoreCount, and any
808 // still-present element can be un-invalidated based on that.
809 int i = elements->count() - 1;
810 for (RawElement& e : elements->ritems()) {
811 if (i < fOldestValidIndex) {
812 break;
813 }
814 e.restoreValid(*this);
815 --i;
816 }
817}
818
819void GrClipStack::SaveRecord::invalidateMasks(GrProxyProvider* proxyProvider,
820 Mask::Stack* masks) {
821 // Must explicitly invalidate the key before removing the mask object from the stack
822 while (masks->count() > fStartingMaskIndex) {
823 SkASSERT(masks->back().owner() == this && proxyProvider);
824 masks->back().invalidate(proxyProvider);
825 masks->pop_back();
826 }
827 SkASSERT(masks->empty() || masks->back().genID() != fGenID);
828}
829
830void GrClipStack::SaveRecord::reset(const SkIRect& bounds) {
831 SkASSERT(this->canBeUpdated());
832 fOldestValidIndex = fStartingElementIndex;
833 fOuterBounds = bounds;
834 fInnerBounds = bounds;
835 fStackOp = SkClipOp::kIntersect;
836 fState = ClipState::kWideOpen;
837 fShader = nullptr;
838}
839
840void GrClipStack::SaveRecord::addShader(sk_sp<SkShader> shader) {
841 SkASSERT(shader);
842 SkASSERT(this->canBeUpdated());
843 if (!fShader) {
844 fShader = std::move(shader);
845 } else {
846 // The total coverage is computed by multiplying the coverage from each element (shape or
847 // shader), but since multiplication is associative, we can use kSrcIn blending to make
848 // a new shader that represents 'shader' * 'fShader'
849 fShader = SkShaders::Blend(SkBlendMode::kSrcIn, std::move(shader), fShader);
850 }
851}
852
853bool GrClipStack::SaveRecord::addElement(RawElement&& toAdd, RawElement::Stack* elements) {
854 // Validity check the element's state first; if the shape class isn't empty, the outer bounds
855 // shouldn't be empty; if the inner bounds are not empty, they must be contained in outer.
856 SkASSERT((toAdd.shape().isEmpty() || !toAdd.outerBounds().isEmpty()) &&
857 (toAdd.innerBounds().isEmpty() || toAdd.outerBounds().contains(toAdd.innerBounds())));
858 // And we shouldn't be adding an element if we have a deferred save
859 SkASSERT(this->canBeUpdated());
860
861 if (fState == ClipState::kEmpty) {
862 // The clip is already empty, and we only shrink, so there's no need to record this element.
863 return false;
864 } else if (toAdd.shape().isEmpty()) {
865 // An empty difference op should have been detected earlier, since it's a no-op
866 SkASSERT(toAdd.op() == SkClipOp::kIntersect);
867 fState = ClipState::kEmpty;
868 return true;
869 }
870
871 // In this invocation, 'A' refers to the existing stack's bounds and 'B' refers to the new
872 // element.
873 switch (get_clip_geometry(*this, toAdd)) {
874 case ClipGeometry::kEmpty:
875 // The combination results in an empty clip
876 fState = ClipState::kEmpty;
877 return true;
878
879 case ClipGeometry::kAOnly:
880 // The combination would not be any different than the existing clip
881 return false;
882
883 case ClipGeometry::kBOnly:
884 // The combination would invalidate the entire existing stack and can be replaced with
885 // just the new element.
886 this->replaceWithElement(std::move(toAdd), elements);
887 return true;
888
889 case ClipGeometry::kBoth:
890 // The new element combines in a complex manner, so update the stack's bounds based on
891 // the combination of its and the new element's ops (handled below)
892 break;
893 }
894
895 if (fState == ClipState::kWideOpen) {
896 // When the stack was wide open and the clip effect was kBoth, the "complex" manner is
897 // simply to keep the element and update the stack bounds to be the element's intersected
898 // with the device.
899 this->replaceWithElement(std::move(toAdd), elements);
900 return true;
901 }
902
903 // Some form of actual clip element(s) to combine with.
904 if (fStackOp == SkClipOp::kIntersect) {
905 if (toAdd.op() == SkClipOp::kIntersect) {
906 // Intersect (stack) + Intersect (toAdd)
907 // - Bounds updates is simply the paired intersections of outer and inner.
908 SkAssertResult(fOuterBounds.intersect(toAdd.outerBounds()));
909 if (!fInnerBounds.intersect(toAdd.innerBounds())) {
910 // NOTE: this does the right thing if either rect is empty, since we set the
911 // inner bounds to empty here
912 fInnerBounds = SkIRect::MakeEmpty();
913 }
914 } else {
915 // Intersect (stack) + Difference (toAdd)
916 // - Shrink the stack's outer bounds if the difference op's inner bounds completely
917 // cuts off an edge.
918 // - Shrink the stack's inner bounds to completely exclude the op's outer bounds.
919 fOuterBounds = subtract(fOuterBounds, toAdd.innerBounds(), /* exact */ true);
920 fInnerBounds = subtract(fInnerBounds, toAdd.outerBounds(), /* exact */ false);
921 }
922 } else {
923 if (toAdd.op() == SkClipOp::kIntersect) {
924 // Difference (stack) + Intersect (toAdd)
925 // - Bounds updates are just the mirror of Intersect(stack) + Difference(toAdd)
926 SkIRect oldOuter = fOuterBounds;
927 fOuterBounds = subtract(toAdd.outerBounds(), fInnerBounds, /* exact */ true);
928 fInnerBounds = subtract(toAdd.innerBounds(), oldOuter, /* exact */ false);
929 } else {
930 // Difference (stack) + Difference (toAdd)
931 // - The updated outer bounds is the union of outer bounds and the inner becomes the
932 // largest of the two possible inner bounds
933 fOuterBounds.join(toAdd.outerBounds());
934 if (toAdd.innerBounds().width() * toAdd.innerBounds().height() >
935 fInnerBounds.width() * fInnerBounds.height()) {
936 fInnerBounds = toAdd.innerBounds();
937 }
938 }
939 }
940
941 // If we get here, we're keeping the new element and the stack's bounds have been updated.
942 // We ought to have caught the cases where the stack bounds resemble an empty or wide open
943 // clip, so assert that's the case.
944 SkASSERT(!fOuterBounds.isEmpty() &&
945 (fInnerBounds.isEmpty() || fOuterBounds.contains(fInnerBounds)));
946
947 return this->appendElement(std::move(toAdd), elements);
948}
949
950bool GrClipStack::SaveRecord::appendElement(RawElement&& toAdd, RawElement::Stack* elements) {
951 // Update past elements to account for the new element
952 int i = elements->count() - 1;
953
954 // After the loop, elements between [max(youngestValid, startingIndex)+1, count-1] can be
955 // removed from the stack (these are the active elements that have been invalidated by the
956 // newest element; since it's the active part of the stack, no restore() can bring them back).
957 int youngestValid = fStartingElementIndex - 1;
958 // After the loop, elements between [0, oldestValid-1] are all invalid. The value of oldestValid
959 // becomes the save record's new fLastValidIndex value.
960 int oldestValid = elements->count();
961 // After the loop, this is the earliest active element that was invalidated. It may be
962 // older in the stack than earliestValid, so cannot be popped off, but can be used to store
963 // the new element instead of allocating more.
964 RawElement* oldestActiveInvalid = nullptr;
965 int oldestActiveInvalidIndex = elements->count();
966
967 for (RawElement& existing : elements->ritems()) {
968 if (i < fOldestValidIndex) {
969 break;
970 }
971 // We don't need to pass the actual index that toAdd will be saved to; just the minimum
972 // index of this save record, since that will result in the same restoration behavior later.
973 existing.updateForElement(&toAdd, *this);
974
975 if (toAdd.isInvalid()) {
976 if (existing.isInvalid()) {
977 // Both new and old invalid implies the entire clip becomes empty
978 fState = ClipState::kEmpty;
979 return true;
980 } else {
981 // The new element doesn't change the clip beyond what the old element already does
982 return false;
983 }
984 } else if (existing.isInvalid()) {
985 // The new element cancels out the old element. The new element may have been modified
986 // to account for the old element's geometry.
987 if (i >= fStartingElementIndex) {
988 // Still active, so the invalidated index could be used to store the new element
989 oldestActiveInvalid = &existing;
990 oldestActiveInvalidIndex = i;
991 }
992 } else {
993 // Keep both new and old elements
994 oldestValid = i;
995 if (i > youngestValid) {
996 youngestValid = i;
997 }
998 }
999
1000 --i;
1001 }
1002
1003 // Post-iteration validity check
1004 SkASSERT(oldestValid == elements->count() ||
1005 (oldestValid >= fOldestValidIndex && oldestValid < elements->count()));
1006 SkASSERT(youngestValid == fStartingElementIndex - 1 ||
1007 (youngestValid >= fStartingElementIndex && youngestValid < elements->count()));
1008 SkASSERT((oldestActiveInvalid && oldestActiveInvalidIndex >= fStartingElementIndex &&
1009 oldestActiveInvalidIndex < elements->count()) || !oldestActiveInvalid);
1010
1011 // Update final state
1012 SkASSERT(oldestValid >= fOldestValidIndex);
1013 fOldestValidIndex = std::min(oldestValid, oldestActiveInvalidIndex);
1014 fState = oldestValid == elements->count() ? toAdd.clipType() : ClipState::kComplex;
1015 if (fStackOp == SkClipOp::kDifference && toAdd.op() == SkClipOp::kIntersect) {
1016 // The stack remains in difference mode only as long as all elements are difference
1017 fStackOp = SkClipOp::kIntersect;
1018 }
1019
1020 int targetCount = youngestValid + 1;
1021 if (!oldestActiveInvalid || oldestActiveInvalidIndex >= targetCount) {
1022 // toAdd will be stored right after youngestValid
1023 targetCount++;
1024 oldestActiveInvalid = nullptr;
1025 }
1026 while (elements->count() > targetCount) {
1027 SkASSERT(oldestActiveInvalid != &elements->back()); // shouldn't delete what we'll reuse
1028 elements->pop_back();
1029 }
1030 if (oldestActiveInvalid) {
1031 *oldestActiveInvalid = std::move(toAdd);
1032 } else if (elements->count() < targetCount) {
1033 elements->push_back(std::move(toAdd));
1034 } else {
1035 elements->back() = std::move(toAdd);
1036 }
1037
1038 // Changing this will prompt GrClipStack to invalidate any masks associated with this record.
1039 fGenID = next_gen_id();
1040 return true;
1041}
1042
1043void GrClipStack::SaveRecord::replaceWithElement(RawElement&& toAdd, RawElement::Stack* elements) {
1044 // The aggregate state of the save record mirrors the element
1045 fInnerBounds = toAdd.innerBounds();
1046 fOuterBounds = toAdd.outerBounds();
1047 fStackOp = toAdd.op();
1048 fState = toAdd.clipType();
1049
1050 // All prior active element can be removed from the stack: [startingIndex, count - 1]
1051 int targetCount = fStartingElementIndex + 1;
1052 while (elements->count() > targetCount) {
1053 elements->pop_back();
1054 }
1055 if (elements->count() < targetCount) {
1056 elements->push_back(std::move(toAdd));
1057 } else {
1058 elements->back() = std::move(toAdd);
1059 }
1060
1061 SkASSERT(elements->count() == fStartingElementIndex + 1);
1062
1063 // This invalidates all older elements that are owned by save records lower in the clip stack.
1064 fOldestValidIndex = fStartingElementIndex;
1065 fGenID = next_gen_id();
1066}
1067
1068///////////////////////////////////////////////////////////////////////////////
1069// GrClipStack
1070
1071// NOTE: Based on draw calls in all GMs, SKPs, and SVGs as of 08/20, 98% use a clip stack with
1072// one Element and up to two SaveRecords, thus the inline size for RawElement::Stack and
1073// SaveRecord::Stack (this conveniently keeps the size of GrClipStack manageable). The max
1074// encountered element stack depth was 5 and the max save depth was 6. Using an increment of 8 for
1075// these stacks means that clip management will incur a single allocation for the remaining 2%
1076// of the draws, with extra head room for more complex clips encountered in the wild.
1077//
1078// The mask stack increment size was chosen to be smaller since only 0.2% of the evaluated draw call
1079// set ever used a mask (which includes stencil masks), or up to 0.3% when CCPR is disabled.
1080static constexpr int kElementStackIncrement = 8;
1081static constexpr int kSaveStackIncrement = 8;
1082static constexpr int kMaskStackIncrement = 4;
1083
1084// And from this same draw call set, the most complex clip could only use 5 analytic coverage FPs.
1085// Historically we limited it to 4 based on Blink's call pattern, so we keep the limit as-is since
1086// it's so close to the empirically encountered max.
1087static constexpr int kMaxAnalyticFPs = 4;
1088// The number of stack-allocated mask pointers to store before extending the arrays.
1089// Stack size determined empirically, the maximum number of elements put in a SW mask was 4
1090// across our set of GMs, SKPs, and SVGs used for testing.
1091static constexpr int kNumStackMasks = 4;
1092
1093GrClipStack::GrClipStack(const SkIRect& deviceBounds, const SkMatrixProvider* matrixProvider,
1094 bool forceAA)
1095 : fElements(kElementStackIncrement)
1096 , fSaves(kSaveStackIncrement)
1097 , fMasks(kMaskStackIncrement)
1098 , fProxyProvider(nullptr)
1099 , fDeviceBounds(deviceBounds)
1100 , fMatrixProvider(matrixProvider)
1101 , fForceAA(forceAA) {
1102 // Start with a save record that is wide open
1103 fSaves.emplace_back(deviceBounds);
1104}
1105
1106GrClipStack::~GrClipStack() {
1107 // Invalidate all mask keys that remain. Since we're tearing the clip stack down, we don't need
1108 // to go through SaveRecord.
1109 SkASSERT(fProxyProvider || fMasks.empty());
1110 if (fProxyProvider) {
1111 for (Mask& m : fMasks.ritems()) {
1112 m.invalidate(fProxyProvider);
1113 }
1114 }
1115}
1116
1117void GrClipStack::save() {
1118 SkASSERT(!fSaves.empty());
1119 fSaves.back().pushSave();
1120}
1121
1122void GrClipStack::restore() {
1123 SkASSERT(!fSaves.empty());
1124 SaveRecord& current = fSaves.back();
1125 if (current.popSave()) {
1126 // This was just a deferred save being undone, so the record doesn't need to be removed yet
1127 return;
1128 }
1129
1130 // When we remove a save record, we delete all elements >= its starting index and any masks
1131 // that were rasterized for it.
1132 current.removeElements(&fElements);
1133 SkASSERT(fProxyProvider || fMasks.empty());
1134 if (fProxyProvider) {
1135 current.invalidateMasks(fProxyProvider, &fMasks);
1136 }
1137 fSaves.pop_back();
1138 // Restore any remaining elements that were only invalidated by the now-removed save record.
1139 fSaves.back().restoreElements(&fElements);
1140}
1141
1142SkIRect GrClipStack::getConservativeBounds() const {
1143 const SaveRecord& current = this->currentSaveRecord();
1144 if (current.state() == ClipState::kEmpty) {
1145 return SkIRect::MakeEmpty();
1146 } else if (current.state() == ClipState::kWideOpen) {
1147 return fDeviceBounds;
1148 } else {
1149 if (current.op() == SkClipOp::kDifference) {
1150 // The outer/inner bounds represent what's cut out, so full bounds remains the device
1151 // bounds, minus any fully clipped content that spans the device edge.
1152 return subtract(fDeviceBounds, current.innerBounds(), /* exact */ true);
1153 } else {
1154 SkASSERT(fDeviceBounds.contains(current.outerBounds()));
1155 return current.outerBounds();
1156 }
1157 }
1158}
1159
1160GrClip::PreClipResult GrClipStack::preApply(const SkRect& bounds, GrAA aa) const {
1161 Draw draw(bounds, fForceAA ? GrAA::kYes : aa);
1162 if (!draw.applyDeviceBounds(fDeviceBounds)) {
1163 return GrClip::Effect::kClippedOut;
1164 }
1165
1166 const SaveRecord& cs = this->currentSaveRecord();
1167 // Early out if we know a priori that the clip is full 0s or full 1s.
1168 if (cs.state() == ClipState::kEmpty) {
1169 return GrClip::Effect::kClippedOut;
1170 } else if (cs.state() == ClipState::kWideOpen) {
1171 SkASSERT(!cs.shader());
1172 return GrClip::Effect::kUnclipped;
1173 }
1174
1175 // Given argument order, 'A' == current clip, 'B' == draw
1176 switch (get_clip_geometry(cs, draw)) {
1177 case ClipGeometry::kEmpty:
1178 // Can ignore the shader since the geometry removed everything already
1179 return GrClip::Effect::kClippedOut;
1180
1181 case ClipGeometry::kBOnly:
1182 // Geometrically, the draw is unclipped, but can't ignore a shader
1183 return cs.shader() ? GrClip::Effect::kClipped : GrClip::Effect::kUnclipped;
1184
1185 case ClipGeometry::kAOnly:
1186 // Shouldn't happen since the inner bounds of a draw are unknown
1187 SkASSERT(false);
1188 // But if it did, it technically means the draw covered the clip and should be
1189 // considered kClipped or similar, which is what the next case handles.
1190 [[fallthrough]];
1191
1192 case ClipGeometry::kBoth: {
1193 SkASSERT(fElements.count() > 0);
1194 const RawElement& back = fElements.back();
1195 if (cs.state() == ClipState::kDeviceRect) {
1196 SkASSERT(back.clipType() == ClipState::kDeviceRect);
1197 return {back.shape().rect(), back.aa()};
1198 } else if (cs.state() == ClipState::kDeviceRRect) {
1199 SkASSERT(back.clipType() == ClipState::kDeviceRRect);
1200 return {back.shape().rrect(), back.aa()};
1201 } else {
1202 // The clip stack has complex shapes, multiple elements, or a shader; we could
1203 // iterate per element like we would in apply(), but preApply() is meant to be
1204 // conservative and efficient.
1205 SkASSERT(cs.state() == ClipState::kComplex);
1206 return GrClip::Effect::kClipped;
1207 }
1208 }
1209 }
1210
1211 SkUNREACHABLE;
1212}
1213
Brian Salomoneebe7352020-12-09 16:37:04 -05001214GrClip::Effect GrClipStack::apply(GrRecordingContext* context, GrSurfaceDrawContext* rtc,
Michael Ludwiga195d102020-09-15 14:51:52 -04001215 GrAAType aa, bool hasUserStencilSettings,
1216 GrAppliedClip* out, SkRect* bounds) const {
1217 // TODO: Once we no longer store SW masks, we don't need to sneak the provider in like this
1218 if (!fProxyProvider) {
1219 fProxyProvider = context->priv().proxyProvider();
1220 }
1221 SkASSERT(fProxyProvider == context->priv().proxyProvider());
1222 const GrCaps* caps = context->priv().caps();
1223
1224 // Convert the bounds to a Draw and apply device bounds clipping, making our query as tight
1225 // as possible.
1226 Draw draw(*bounds, GrAA(fForceAA || aa != GrAAType::kNone));
1227 if (!draw.applyDeviceBounds(fDeviceBounds)) {
1228 return Effect::kClippedOut;
1229 }
1230 SkAssertResult(bounds->intersect(SkRect::Make(fDeviceBounds)));
1231
1232 const SaveRecord& cs = this->currentSaveRecord();
1233 // Early out if we know a priori that the clip is full 0s or full 1s.
1234 if (cs.state() == ClipState::kEmpty) {
1235 return Effect::kClippedOut;
1236 } else if (cs.state() == ClipState::kWideOpen) {
1237 SkASSERT(!cs.shader());
1238 return Effect::kUnclipped;
1239 }
1240
1241 // Convert any clip shader first, since it's not geometrically related to the draw bounds
1242 std::unique_ptr<GrFragmentProcessor> clipFP = nullptr;
1243 if (cs.shader()) {
1244 static const GrColorInfo kCoverageColorInfo{GrColorType::kUnknown, kPremul_SkAlphaType,
1245 nullptr};
Mike Reed52130b02020-12-28 15:33:13 -05001246 GrFPArgs args(context, *fMatrixProvider, SkSamplingOptions(), &kCoverageColorInfo);
Michael Ludwiga195d102020-09-15 14:51:52 -04001247 clipFP = as_SB(cs.shader())->asFragmentProcessor(args);
1248 if (clipFP) {
Michael Ludwig4ce77862020-10-27 18:07:29 -04001249 // The initial input is the coverage from the geometry processor, so this ensures it
1250 // is multiplied properly with the alpha of the clip shader.
1251 clipFP = GrFragmentProcessor::MulInputByChildAlpha(std::move(clipFP));
Michael Ludwiga195d102020-09-15 14:51:52 -04001252 }
1253 }
1254
1255 // A refers to the entire clip stack, B refers to the draw
1256 switch (get_clip_geometry(cs, draw)) {
1257 case ClipGeometry::kEmpty:
1258 return Effect::kClippedOut;
1259
1260 case ClipGeometry::kBOnly:
1261 // Geometrically unclipped, but may need to add the shader as a coverage FP
1262 if (clipFP) {
1263 out->addCoverageFP(std::move(clipFP));
1264 return Effect::kClipped;
1265 } else {
1266 return Effect::kUnclipped;
1267 }
1268
1269 case ClipGeometry::kAOnly:
1270 // Shouldn't happen since draws don't report inner bounds
1271 SkASSERT(false);
1272 [[fallthrough]];
1273
1274 case ClipGeometry::kBoth:
1275 // The draw is combined with the saved clip elements; the below logic tries to skip
1276 // as many elements as possible.
1277 SkASSERT(cs.state() == ClipState::kDeviceRect ||
1278 cs.state() == ClipState::kDeviceRRect ||
1279 cs.state() == ClipState::kComplex);
1280 break;
1281 }
1282
1283 // We can determine a scissor based on the draw and the overall stack bounds.
1284 SkIRect scissorBounds;
1285 if (cs.op() == SkClipOp::kIntersect) {
1286 // Initially we keep this as large as possible; if the clip is applied solely with coverage
1287 // FPs then using a loose scissor increases the chance we can batch the draws.
1288 // We tighten it later if any form of mask or atlas element is needed.
1289 scissorBounds = cs.outerBounds();
1290 } else {
1291 scissorBounds = subtract(draw.outerBounds(), cs.innerBounds(), /* exact */ true);
1292 }
1293
1294 // We mark this true once we have a coverage FP (since complex clipping is occurring), or we
1295 // have an element that wouldn't affect the scissored draw bounds, but does affect the regular
1296 // draw bounds. In that case, the scissor is sufficient for clipping and we can skip the
1297 // element but definitely cannot then drop the scissor.
1298 bool scissorIsNeeded = SkToBool(cs.shader());
1299
1300 int remainingAnalyticFPs = kMaxAnalyticFPs;
Michael Ludwigb28e1412020-09-18 15:07:49 -04001301 if (hasUserStencilSettings) {
1302 // Disable analytic clips when there are user stencil settings to ensure the clip is
1303 // respected in the stencil buffer.
Michael Ludwiga195d102020-09-15 14:51:52 -04001304 remainingAnalyticFPs = 0;
Michael Ludwigb28e1412020-09-18 15:07:49 -04001305 // If we have user stencil settings, we shouldn't be avoiding the stencil buffer anyways.
Michael Ludwiga195d102020-09-15 14:51:52 -04001306 SkASSERT(!context->priv().caps()->avoidStencilBuffers());
1307 }
1308
1309 // If window rectangles are supported, we can use them to exclude inner bounds of difference ops
Brian Salomon70fe17e2020-11-30 14:33:58 -05001310 int maxWindowRectangles = rtc->maxWindowRectangles();
Michael Ludwiga195d102020-09-15 14:51:52 -04001311 GrWindowRectangles windowRects;
1312
1313 // Elements not represented as an analytic FP or skipped will be collected here and later
1314 // applied by using the stencil buffer, CCPR clip atlas, or a cached SW mask.
1315 SkSTArray<kNumStackMasks, const Element*> elementsForMask;
1316 SkSTArray<kNumStackMasks, const RawElement*> elementsForAtlas;
1317
1318 bool maskRequiresAA = false;
1319 auto* ccpr = context->priv().drawingManager()->getCoverageCountingPathRenderer();
1320
1321 int i = fElements.count();
1322 for (const RawElement& e : fElements.ritems()) {
1323 --i;
1324 if (i < cs.oldestElementIndex()) {
1325 // All earlier elements have been invalidated by elements already processed
1326 break;
1327 } else if (e.isInvalid()) {
1328 continue;
1329 }
1330
1331 switch (get_clip_geometry(e, draw)) {
1332 case ClipGeometry::kEmpty:
1333 // This can happen for difference op elements that have a larger fInnerBounds than
1334 // can be preserved at the next level.
1335 return Effect::kClippedOut;
1336
1337 case ClipGeometry::kBOnly:
1338 // We don't need to produce a coverage FP or mask for the element
1339 break;
1340
1341 case ClipGeometry::kAOnly:
1342 // Shouldn't happen for draws, fall through to regular element processing
1343 SkASSERT(false);
1344 [[fallthrough]];
1345
1346 case ClipGeometry::kBoth: {
1347 // The element must apply coverage to the draw, enable the scissor to limit overdraw
1348 scissorIsNeeded = true;
1349
1350 // First apply using HW methods (scissor and window rects). When the inner and outer
1351 // bounds match, nothing else needs to be done.
1352 bool fullyApplied = false;
1353 if (e.op() == SkClipOp::kIntersect) {
1354 // The second test allows clipped draws that are scissored by multiple elements
1355 // to remain scissor-only.
1356 fullyApplied = e.innerBounds() == e.outerBounds() ||
1357 e.innerBounds().contains(scissorBounds);
1358 } else {
Robert Phillipsc4fbc8d2020-11-30 10:17:53 -05001359 if (!e.innerBounds().isEmpty() && windowRects.count() < maxWindowRectangles) {
Michael Ludwiga195d102020-09-15 14:51:52 -04001360 // TODO: If we have more difference ops than available window rects, we
1361 // should prioritize those with the largest inner bounds.
1362 windowRects.addWindow(e.innerBounds());
1363 fullyApplied = e.innerBounds() == e.outerBounds();
1364 }
1365 }
1366
1367 if (!fullyApplied && remainingAnalyticFPs > 0) {
1368 std::tie(fullyApplied, clipFP) = analytic_clip_fp(e.asElement(),
1369 *caps->shaderCaps(),
1370 std::move(clipFP));
1371 if (fullyApplied) {
1372 remainingAnalyticFPs--;
1373 } else if (ccpr && e.aa() == GrAA::kYes) {
1374 // While technically the element is turned into a mask, each atlas entry
1375 // counts towards the FP complexity of the clip.
1376 // TODO - CCPR needs a stable ops task ID so we can't create FPs until we
1377 // know any other mask generation is finished. It also only works with AA
1378 // shapes, future atlas systems can improve on this.
1379 elementsForAtlas.push_back(&e);
1380 remainingAnalyticFPs--;
1381 fullyApplied = true;
1382 }
1383 }
1384
1385 if (!fullyApplied) {
1386 elementsForMask.push_back(&e.asElement());
1387 maskRequiresAA |= (e.aa() == GrAA::kYes);
1388 }
1389
1390 break;
1391 }
1392 }
1393 }
1394
1395 if (!scissorIsNeeded) {
1396 // More detailed analysis of the element shapes determined no clip is needed
1397 SkASSERT(elementsForMask.empty() && elementsForAtlas.empty() && !clipFP);
1398 return Effect::kUnclipped;
1399 }
1400
1401 // Fill out the GrAppliedClip with what we know so far, possibly with a tightened scissor
1402 if (cs.op() == SkClipOp::kIntersect &&
1403 (!elementsForMask.empty() || !elementsForAtlas.empty())) {
1404 SkAssertResult(scissorBounds.intersect(draw.outerBounds()));
1405 }
1406 if (!GrClip::IsInsideClip(scissorBounds, *bounds)) {
1407 out->hardClip().addScissor(scissorBounds, bounds);
1408 }
1409 if (!windowRects.empty()) {
1410 out->hardClip().addWindowRectangles(windowRects, GrWindowRectsState::Mode::kExclusive);
1411 }
1412
1413 // Now rasterize any remaining elements, either to the stencil or a SW mask. All elements are
1414 // flattened into a single mask.
1415 if (!elementsForMask.empty()) {
1416 bool stencilUnavailable = context->priv().caps()->avoidStencilBuffers() ||
1417 rtc->wrapsVkSecondaryCB();
1418
1419 bool hasSWMask = false;
1420 if ((rtc->numSamples() <= 1 && maskRequiresAA) || stencilUnavailable) {
1421 // Must use a texture mask to represent the combined clip elements since the stencil
1422 // cannot be used, or cannot handle smooth clips.
1423 std::tie(hasSWMask, clipFP) = GetSWMaskFP(
1424 context, &fMasks, cs, scissorBounds, elementsForMask.begin(),
1425 elementsForMask.count(), std::move(clipFP));
1426 }
1427
1428 if (!hasSWMask) {
1429 if (stencilUnavailable) {
1430 SkDebugf("WARNING: Clip mask requires stencil, but stencil unavailable. "
1431 "Draw will be ignored.\n");
1432 return Effect::kClippedOut;
1433 } else {
1434 // Rasterize the remaining elements to the stencil buffer
1435 render_stencil_mask(context, rtc, cs.genID(), scissorBounds,
1436 elementsForMask.begin(), elementsForMask.count(), out);
1437 }
1438 }
1439 }
1440
1441 // Finish CCPR paths now that the render target's ops task is stable.
1442 if (!elementsForAtlas.empty()) {
1443 uint32_t opsTaskID = rtc->getOpsTask()->uniqueID();
1444 for (int i = 0; i < elementsForAtlas.count(); ++i) {
1445 SkASSERT(elementsForAtlas[i]->aa() == GrAA::kYes);
1446 clipFP = clip_atlas_fp(ccpr, opsTaskID, scissorBounds, elementsForAtlas[i]->asElement(),
1447 elementsForAtlas[i]->devicePath(), *caps, std::move(clipFP));
1448 }
1449 }
1450
1451 if (clipFP) {
1452 // This will include all analytic FPs, all CCPR atlas FPs, and a SW mask FP.
1453 out->addCoverageFP(std::move(clipFP));
1454 }
1455
1456 SkASSERT(out->doesClip());
1457 return Effect::kClipped;
1458}
1459
1460GrClipStack::SaveRecord& GrClipStack::writableSaveRecord(bool* wasDeferred) {
1461 SaveRecord& current = fSaves.back();
1462 if (current.canBeUpdated()) {
1463 // Current record is still open, so it can be modified directly
1464 *wasDeferred = false;
1465 return current;
1466 } else {
1467 // Must undefer the save to get a new record.
1468 SkAssertResult(current.popSave());
1469 *wasDeferred = true;
1470 return fSaves.emplace_back(current, fMasks.count(), fElements.count());
1471 }
1472}
1473
1474void GrClipStack::clipShader(sk_sp<SkShader> shader) {
1475 // Shaders can't bring additional coverage
1476 if (this->currentSaveRecord().state() == ClipState::kEmpty) {
1477 return;
1478 }
1479
1480 bool wasDeferred;
1481 this->writableSaveRecord(&wasDeferred).addShader(std::move(shader));
1482 // Masks and geometry elements are not invalidated by updating the clip shader
1483}
1484
1485void GrClipStack::replaceClip(const SkIRect& rect) {
1486 bool wasDeferred;
1487 SaveRecord& save = this->writableSaveRecord(&wasDeferred);
1488
1489 if (!wasDeferred) {
1490 save.removeElements(&fElements);
1491 save.invalidateMasks(fProxyProvider, &fMasks);
1492 }
1493
1494 save.reset(fDeviceBounds);
1495 if (rect != fDeviceBounds) {
1496 this->clipRect(SkMatrix::I(), SkRect::Make(rect), GrAA::kNo, SkClipOp::kIntersect);
1497 }
1498}
1499
1500void GrClipStack::clip(RawElement&& element) {
1501 if (this->currentSaveRecord().state() == ClipState::kEmpty) {
1502 return;
1503 }
1504
1505 // Reduce the path to anything simpler, will apply the transform if it's a scale+translate
1506 // and ensures the element's bounds are clipped to the device (NOT the conservative clip bounds,
1507 // since those are based on the net effect of all elements while device bounds clipping happens
1508 // implicitly. During addElement, we may still be able to invalidate some older elements).
1509 element.simplify(fDeviceBounds, fForceAA);
1510 SkASSERT(!element.shape().inverted());
1511
1512 // An empty op means do nothing (for difference), or close the save record, so we try and detect
1513 // that early before doing additional unnecessary save record allocation.
1514 if (element.shape().isEmpty()) {
1515 if (element.op() == SkClipOp::kDifference) {
1516 // If the shape is empty and we're subtracting, this has no effect on the clip
1517 return;
1518 }
1519 // else we will make the clip empty, but we need a new save record to record that change
1520 // in the clip state; fall through to below and updateForElement() will handle it.
1521 }
1522
1523 bool wasDeferred;
1524 SaveRecord& save = this->writableSaveRecord(&wasDeferred);
1525 SkDEBUGCODE(uint32_t oldGenID = save.genID();)
1526 SkDEBUGCODE(int elementCount = fElements.count();)
1527 if (!save.addElement(std::move(element), &fElements)) {
1528 if (wasDeferred) {
1529 // We made a new save record, but ended up not adding an element to the stack.
1530 // So instead of keeping an empty save record around, pop it off and restore the counter
1531 SkASSERT(elementCount == fElements.count());
1532 fSaves.pop_back();
1533 fSaves.back().pushSave();
1534 } else {
1535 // Should not have changed gen ID if the element and save were not modified
1536 SkASSERT(oldGenID == save.genID());
1537 }
1538 } else {
1539 // The gen ID should be new, and should not be invalid
1540 SkASSERT(oldGenID != save.genID() && save.genID() != kInvalidGenID);
1541 if (fProxyProvider && !wasDeferred) {
1542 // We modified an active save record so any old masks it had can be invalidated
1543 save.invalidateMasks(fProxyProvider, &fMasks);
1544 }
1545 }
1546}
1547
1548GrFPResult GrClipStack::GetSWMaskFP(GrRecordingContext* context, Mask::Stack* masks,
1549 const SaveRecord& current, const SkIRect& bounds,
1550 const Element** elements, int count,
1551 std::unique_ptr<GrFragmentProcessor> clipFP) {
1552 GrProxyProvider* proxyProvider = context->priv().proxyProvider();
Brian Salomonc85bce82020-12-29 09:32:52 -05001553 GrSurfaceProxyView maskProxy;
Michael Ludwiga195d102020-09-15 14:51:52 -04001554
1555 SkIRect maskBounds; // may not be 'bounds' if we reuse a large clip mask
1556 // Check the existing masks from this save record for compatibility
1557 for (const Mask& m : masks->ritems()) {
1558 if (m.genID() != current.genID()) {
1559 break;
1560 }
1561 if (m.appliesToDraw(current, bounds)) {
Brian Salomonc85bce82020-12-29 09:32:52 -05001562 maskProxy = proxyProvider->findCachedProxyWithColorTypeFallback(
1563 m.key(), kMaskOrigin, GrColorType::kAlpha_8, 1);
1564 if (maskProxy) {
Michael Ludwiga195d102020-09-15 14:51:52 -04001565 maskBounds = m.bounds();
1566 break;
1567 }
1568 }
1569 }
1570
Brian Salomonc85bce82020-12-29 09:32:52 -05001571 if (!maskProxy) {
Michael Ludwiga195d102020-09-15 14:51:52 -04001572 // No existing mask was found, so need to render a new one
Brian Salomonc85bce82020-12-29 09:32:52 -05001573 maskProxy = render_sw_mask(context, bounds, elements, count);
1574 if (!maskProxy) {
Michael Ludwiga195d102020-09-15 14:51:52 -04001575 // If we still don't have one, there's nothing we can do
1576 return GrFPFailure(std::move(clipFP));
1577 }
1578
1579 // Register the mask for later invalidation
1580 Mask& mask = masks->emplace_back(current, bounds);
Brian Salomonc85bce82020-12-29 09:32:52 -05001581 proxyProvider->assignUniqueKeyToProxy(mask.key(), maskProxy.asTextureProxy());
Michael Ludwiga195d102020-09-15 14:51:52 -04001582 maskBounds = bounds;
1583 }
1584
1585 // Wrap the mask in an FP that samples it for coverage
Brian Salomonc85bce82020-12-29 09:32:52 -05001586 SkASSERT(maskProxy && maskProxy.origin() == kMaskOrigin);
Michael Ludwiga195d102020-09-15 14:51:52 -04001587
1588 GrSamplerState samplerState(GrSamplerState::WrapMode::kClampToBorder,
1589 GrSamplerState::Filter::kNearest);
1590 // Maps the device coords passed to the texture effect to the top-left corner of the mask, and
1591 // make sure that the draw bounds are pre-mapped into the mask's space as well.
1592 auto m = SkMatrix::Translate(-maskBounds.fLeft, -maskBounds.fTop);
1593 auto subset = SkRect::Make(bounds);
1594 subset.offset(-maskBounds.fLeft, -maskBounds.fTop);
1595 // We scissor to bounds. The mask's texel centers are aligned to device space
1596 // pixel centers. Hence this domain of texture coordinates.
1597 auto domain = subset.makeInset(0.5, 0.5);
Brian Salomonc85bce82020-12-29 09:32:52 -05001598 auto fp = GrTextureEffect::MakeSubset(std::move(maskProxy), kPremul_SkAlphaType, m,
1599 samplerState, subset, domain, *context->priv().caps());
Michael Ludwiga195d102020-09-15 14:51:52 -04001600 fp = GrDeviceSpaceEffect::Make(std::move(fp));
1601
1602 // Must combine the coverage sampled from the texture effect with the previous coverage
Brian Salomonb43d6992021-01-05 14:37:40 -05001603 fp = GrBlendFragmentProcessor::Make(std::move(fp), std::move(clipFP), SkBlendMode::kDstIn);
Michael Ludwiga195d102020-09-15 14:51:52 -04001604 return GrFPSuccess(std::move(fp));
1605}