blob: ec85eaf0e84241866405a04859b7fe6d32ddc8cd [file] [log] [blame]
Michael Ludwiga195d102020-09-15 14:51:52 -04001/*
2 * Copyright 2020 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/gpu/GrClipStack.h"
9
10#include "include/core/SkMatrix.h"
11#include "src/core/SkRRectPriv.h"
12#include "src/core/SkRectPriv.h"
13#include "src/core/SkTaskGroup.h"
14#include "src/gpu/GrClip.h"
Michael Ludwiga195d102020-09-15 14:51:52 -040015#include "src/gpu/GrDeferredProxyUploader.h"
Adlai Hollera0693042020-10-14 11:23:11 -040016#include "src/gpu/GrDirectContextPriv.h"
Michael Ludwiga195d102020-09-15 14:51:52 -040017#include "src/gpu/GrProxyProvider.h"
18#include "src/gpu/GrRecordingContextPriv.h"
Michael Ludwiga195d102020-09-15 14:51:52 -040019#include "src/gpu/GrSWMaskHelper.h"
20#include "src/gpu/GrStencilMaskHelper.h"
21#include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
22#include "src/gpu/effects/GrBlendFragmentProcessor.h"
23#include "src/gpu/effects/GrConvexPolyEffect.h"
24#include "src/gpu/effects/GrRRectEffect.h"
25#include "src/gpu/effects/GrTextureEffect.h"
26#include "src/gpu/effects/generated/GrAARectEffect.h"
27#include "src/gpu/effects/generated/GrDeviceSpaceEffect.h"
28#include "src/gpu/geometry/GrQuadUtils.h"
29
30namespace {
31
32// This captures which of the two elements in (A op B) would be required when they are combined,
33// where op is intersect or difference.
34enum class ClipGeometry {
35 kEmpty,
36 kAOnly,
37 kBOnly,
38 kBoth
39};
40
41// A and B can be Element, SaveRecord, or Draw. Supported combinations are, order not mattering,
42// (Element, Element), (Element, SaveRecord), (Element, Draw), and (SaveRecord, Draw).
43template<typename A, typename B>
44static ClipGeometry get_clip_geometry(const A& a, const B& b) {
45 // NOTE: SkIRect::Intersects() returns false when two rectangles touch at an edge (so the result
46 // is empty). This behavior is desired for the following clip effect policies.
47 if (a.op() == SkClipOp::kIntersect) {
48 if (b.op() == SkClipOp::kIntersect) {
49 // Intersect (A) + Intersect (B)
50 if (!SkIRect::Intersects(a.outerBounds(), b.outerBounds())) {
51 // Regions with non-zero coverage are disjoint, so intersection = empty
52 return ClipGeometry::kEmpty;
53 } else if (b.contains(a)) {
54 // B's full coverage region contains entirety of A, so intersection = A
55 return ClipGeometry::kAOnly;
56 } else if (a.contains(b)) {
57 // A's full coverage region contains entirety of B, so intersection = B
58 return ClipGeometry::kBOnly;
59 } else {
60 // The shapes intersect in some non-trivial manner
61 return ClipGeometry::kBoth;
62 }
63 } else {
64 SkASSERT(b.op() == SkClipOp::kDifference);
65 // Intersect (A) + Difference (B)
66 if (!SkIRect::Intersects(a.outerBounds(), b.outerBounds())) {
67 // A only intersects B's full coverage region, so intersection = A
68 return ClipGeometry::kAOnly;
69 } else if (b.contains(a)) {
70 // B's zero coverage region completely contains A, so intersection = empty
71 return ClipGeometry::kEmpty;
72 } else {
73 // Intersection cannot be simplified. Note that the combination of a intersect
74 // and difference op in this order cannot produce kBOnly
75 return ClipGeometry::kBoth;
76 }
77 }
78 } else {
79 SkASSERT(a.op() == SkClipOp::kDifference);
80 if (b.op() == SkClipOp::kIntersect) {
81 // Difference (A) + Intersect (B) - the mirror of Intersect(A) + Difference(B),
82 // but combining is commutative so this is equivalent barring naming.
83 if (!SkIRect::Intersects(b.outerBounds(), a.outerBounds())) {
84 // B only intersects A's full coverage region, so intersection = B
85 return ClipGeometry::kBOnly;
86 } else if (a.contains(b)) {
87 // A's zero coverage region completely contains B, so intersection = empty
88 return ClipGeometry::kEmpty;
89 } else {
90 // Cannot be simplified
91 return ClipGeometry::kBoth;
92 }
93 } else {
94 SkASSERT(b.op() == SkClipOp::kDifference);
95 // Difference (A) + Difference (B)
96 if (a.contains(b)) {
97 // A's zero coverage region contains B, so B doesn't remove any extra
98 // coverage from their intersection.
99 return ClipGeometry::kAOnly;
100 } else if (b.contains(a)) {
101 // Mirror of the above case, intersection = B instead
102 return ClipGeometry::kBOnly;
103 } else {
104 // Intersection of the two differences cannot be simplified. Note that for
105 // this op combination it is not possible to produce kEmpty.
106 return ClipGeometry::kBoth;
107 }
108 }
109 }
110}
111
112// a.contains(b) where a's local space is defined by 'aToDevice', and b's possibly separate local
113// space is defined by 'bToDevice'. 'a' and 'b' geometry are provided in their local spaces.
114// Automatically takes into account if the anti-aliasing policies differ. When the policies match,
115// we assume that coverage AA or GPU's non-AA rasterization will apply to A and B equivalently, so
116// we can compare the original shapes. When the modes are mixed, we outset B in device space first.
117static bool shape_contains_rect(
118 const GrShape& a, const SkMatrix& aToDevice, const SkMatrix& deviceToA,
119 const SkRect& b, const SkMatrix& bToDevice, bool mixedAAMode) {
120 if (!a.convex()) {
121 return false;
122 }
123
124 if (!mixedAAMode && aToDevice == bToDevice) {
125 // A and B are in the same coordinate space, so don't bother mapping
126 return a.conservativeContains(b);
Michael Ludwigd30e9ef2020-09-28 12:03:01 -0400127 } else if (bToDevice.isIdentity() && aToDevice.preservesAxisAlignment()) {
Michael Ludwig84a008f2020-09-18 15:30:55 -0400128 // Optimize the common case of draws (B, with identity matrix) and axis-aligned shapes,
129 // instead of checking the four corners separately.
130 SkRect bInA = b;
131 if (mixedAAMode) {
132 bInA.outset(0.5f, 0.5f);
133 }
134 SkAssertResult(deviceToA.mapRect(&bInA));
135 return a.conservativeContains(bInA);
Michael Ludwiga195d102020-09-15 14:51:52 -0400136 }
137
138 // Test each corner for contains; since a is convex, if all 4 corners of b's bounds are
139 // contained, then the entirety of b is within a.
140 GrQuad deviceQuad = GrQuad::MakeFromRect(b, bToDevice);
141 if (any(deviceQuad.w4f() < SkPathPriv::kW0PlaneDistance)) {
142 // Something in B actually projects behind the W = 0 plane and would be clipped to infinity,
143 // so it's extremely unlikely that A can contain B.
144 return false;
145 }
146 if (mixedAAMode) {
147 // Outset it so its edges are 1/2px out, giving us a buffer to avoid cases where a non-AA
148 // clip or draw would snap outside an aa element.
149 GrQuadUtils::Outset({0.5f, 0.5f, 0.5f, 0.5f}, &deviceQuad);
150 }
151
152 for (int i = 0; i < 4; ++i) {
153 SkPoint cornerInA = deviceQuad.point(i);
154 deviceToA.mapPoints(&cornerInA, 1);
155 if (!a.conservativeContains(cornerInA)) {
156 return false;
157 }
158 }
159
160 return true;
161}
162
163static SkIRect subtract(const SkIRect& a, const SkIRect& b, bool exact) {
164 SkIRect diff;
165 if (SkRectPriv::Subtract(a, b, &diff) || !exact) {
166 // Either A-B is exactly the rectangle stored in diff, or we don't need an exact answer
167 // and can settle for the subrect of A excluded from B (which is also 'diff')
168 return diff;
169 } else {
170 // For our purposes, we want the original A when A-B cannot be exactly represented
171 return a;
172 }
173}
174
175static GrClipEdgeType get_clip_edge_type(SkClipOp op, GrAA aa) {
176 if (op == SkClipOp::kIntersect) {
177 return aa == GrAA::kYes ? GrClipEdgeType::kFillAA : GrClipEdgeType::kFillBW;
178 } else {
179 return aa == GrAA::kYes ? GrClipEdgeType::kInverseFillAA : GrClipEdgeType::kInverseFillBW;
180 }
181}
182
183static uint32_t kInvalidGenID = 0;
184static uint32_t kEmptyGenID = 1;
185static uint32_t kWideOpenGenID = 2;
186
187static uint32_t next_gen_id() {
188 // 0-2 are reserved for invalid, empty & wide-open
189 static const uint32_t kFirstUnreservedGenID = 3;
190 static std::atomic<uint32_t> nextID{kFirstUnreservedGenID};
191
192 uint32_t id;
193 do {
Adlai Holler4888cda2020-11-06 16:37:37 -0500194 id = nextID.fetch_add(1, std::memory_order_relaxed);
Michael Ludwiga195d102020-09-15 14:51:52 -0400195 } while (id < kFirstUnreservedGenID);
196 return id;
197}
198
199// Functions for rendering / applying clip shapes in various ways
200// The general strategy is:
201// - Represent the clip element as an analytic FP that tests sk_FragCoord vs. its device shape
202// - Render the clip element to the stencil, if stencil is allowed and supports the AA, and the
203// size of the element indicates stenciling will be worth it, vs. making a mask.
204// - Try to put the individual element into a clip atlas, which is then sampled during the draw
205// - Render the element into a SW mask and upload it. If possible, the SW rasterization happens
206// in parallel.
207static constexpr GrSurfaceOrigin kMaskOrigin = kTopLeft_GrSurfaceOrigin;
208
209static GrFPResult analytic_clip_fp(const GrClipStack::Element& e,
210 const GrShaderCaps& caps,
211 std::unique_ptr<GrFragmentProcessor> fp) {
212 // All analytic clip shape FPs need to be in device space
213 GrClipEdgeType edgeType = get_clip_edge_type(e.fOp, e.fAA);
214 if (e.fLocalToDevice.isIdentity()) {
215 if (e.fShape.isRect()) {
216 return GrFPSuccess(GrAARectEffect::Make(std::move(fp), edgeType, e.fShape.rect()));
217 } else if (e.fShape.isRRect()) {
218 return GrRRectEffect::Make(std::move(fp), edgeType, e.fShape.rrect(), caps);
219 }
220 }
221
222 // A convex hull can be transformed into device space (this will handle rect shapes with a
223 // non-identity transform).
224 if (e.fShape.segmentMask() == SkPath::kLine_SegmentMask && e.fShape.convex()) {
225 SkPath devicePath;
226 e.fShape.asPath(&devicePath);
227 devicePath.transform(e.fLocalToDevice);
228 return GrConvexPolyEffect::Make(std::move(fp), edgeType, devicePath);
229 }
230
231 return GrFPFailure(std::move(fp));
232}
233
234// TODO: Currently this only works with CCPR because CCPR owns and manages the clip atlas. The
235// high-level concept should be generalized to support any path renderer going into a shared atlas.
236static std::unique_ptr<GrFragmentProcessor> clip_atlas_fp(GrCoverageCountingPathRenderer* ccpr,
237 uint32_t opsTaskID,
238 const SkIRect& bounds,
239 const GrClipStack::Element& e,
240 SkPath* devicePath,
241 const GrCaps& caps,
242 std::unique_ptr<GrFragmentProcessor> fp) {
243 // TODO: Currently the atlas manages device-space paths, so we have to transform by the ctm.
244 // In the future, the atlas manager should see the local path and the ctm so that it can
245 // cache across integer-only translations (internally, it already does this, just not exposed).
246 if (devicePath->isEmpty()) {
247 e.fShape.asPath(devicePath);
248 devicePath->transform(e.fLocalToDevice);
249 SkASSERT(!devicePath->isEmpty());
250 }
251
252 SkASSERT(!devicePath->isInverseFillType());
253 if (e.fOp == SkClipOp::kIntersect) {
254 return ccpr->makeClipProcessor(std::move(fp), opsTaskID, *devicePath, bounds, caps);
255 } else {
256 // Use kDstOut to convert the non-inverted mask alpha into (1-alpha), so the atlas only
257 // ever renders non-inverse filled paths.
258 // - When the input FP is null, this turns into "(1-sample(ccpr, 1).a) * input"
259 // - When not null, it works out to
260 // (1-sample(ccpr, input.rgb1).a) * sample(fp, input.rgb1) * input.a
261 // - Since clips only care about the alpha channel, these are both equivalent to the
262 // desired product of (1-ccpr) * fp * input.a.
263 return GrBlendFragmentProcessor::Make(
264 ccpr->makeClipProcessor(nullptr, opsTaskID, *devicePath, bounds, caps), // src
265 std::move(fp), // dst
266 SkBlendMode::kDstOut);
267 }
268}
269
270static void draw_to_sw_mask(GrSWMaskHelper* helper, const GrClipStack::Element& e, bool clearMask) {
271 // If the first element to draw is an intersect, we clear to 0 and will draw it directly with
272 // coverage 1 (subsequent intersect elements will be inverse-filled and draw 0 outside).
273 // If the first element to draw is a difference, we clear to 1, and in all cases we draw the
274 // difference element directly with coverage 0.
275 if (clearMask) {
276 helper->clear(e.fOp == SkClipOp::kIntersect ? 0x00 : 0xFF);
277 }
278
279 uint8_t alpha;
280 bool invert;
281 if (e.fOp == SkClipOp::kIntersect) {
282 // Intersect modifies pixels outside of its geometry. If this isn't the first op, we
283 // draw the inverse-filled shape with 0 coverage to erase everything outside the element
284 // But if we are the first element, we can draw directly with coverage 1 since we
285 // cleared to 0.
286 if (clearMask) {
287 alpha = 0xFF;
288 invert = false;
289 } else {
290 alpha = 0x00;
291 invert = true;
292 }
293 } else {
294 // For difference ops, can always just subtract the shape directly by drawing 0 coverage
295 SkASSERT(e.fOp == SkClipOp::kDifference);
296 alpha = 0x00;
297 invert = false;
298 }
299
300 // Draw the shape; based on how we've initialized the buffer and chosen alpha+invert,
301 // every element is drawn with the kReplace_Op
302 if (invert) {
303 // Must invert the path
304 SkASSERT(!e.fShape.inverted());
305 // TODO: this is an extra copy effectively, just so we can toggle inversion; would be
306 // better perhaps to just call a drawPath() since we know it'll use path rendering w/
307 // the inverse fill type.
308 GrShape inverted(e.fShape);
309 inverted.setInverted(true);
310 helper->drawShape(inverted, e.fLocalToDevice, SkRegion::kReplace_Op, e.fAA, alpha);
311 } else {
312 helper->drawShape(e.fShape, e.fLocalToDevice, SkRegion::kReplace_Op, e.fAA, alpha);
313 }
314}
315
316static GrSurfaceProxyView render_sw_mask(GrRecordingContext* context, const SkIRect& bounds,
317 const GrClipStack::Element** elements, int count) {
318 SkASSERT(count > 0);
319
320 SkTaskGroup* taskGroup = nullptr;
321 if (auto direct = context->asDirectContext()) {
322 taskGroup = direct->priv().getTaskGroup();
323 }
324
325 if (taskGroup) {
326 const GrCaps* caps = context->priv().caps();
327 GrProxyProvider* proxyProvider = context->priv().proxyProvider();
328
329 // Create our texture proxy
330 GrBackendFormat format = caps->getDefaultBackendFormat(GrColorType::kAlpha_8,
331 GrRenderable::kNo);
332
333 GrSwizzle swizzle = context->priv().caps()->getReadSwizzle(format, GrColorType::kAlpha_8);
334 auto proxy = proxyProvider->createProxy(format, bounds.size(), GrRenderable::kNo, 1,
335 GrMipMapped::kNo, SkBackingFit::kApprox,
336 SkBudgeted::kYes, GrProtected::kNo);
337
338 // Since this will be rendered on another thread, make a copy of the elements in case
339 // the clip stack is modified on the main thread
340 using Uploader = GrTDeferredProxyUploader<SkTArray<GrClipStack::Element>>;
341 std::unique_ptr<Uploader> uploader = std::make_unique<Uploader>(count);
342 for (int i = 0; i < count; ++i) {
343 uploader->data().push_back(*(elements[i]));
344 }
345
346 Uploader* uploaderRaw = uploader.get();
347 auto drawAndUploadMask = [uploaderRaw, bounds] {
348 TRACE_EVENT0("skia.gpu", "Threaded SW Clip Mask Render");
349 GrSWMaskHelper helper(uploaderRaw->getPixels());
350 if (helper.init(bounds)) {
351 for (int i = 0; i < uploaderRaw->data().count(); ++i) {
352 draw_to_sw_mask(&helper, uploaderRaw->data()[i], i == 0);
353 }
354 } else {
355 SkDEBUGFAIL("Unable to allocate SW clip mask.");
356 }
357 uploaderRaw->signalAndFreeData();
358 };
359
360 taskGroup->add(std::move(drawAndUploadMask));
361 proxy->texPriv().setDeferredUploader(std::move(uploader));
362
363 return {std::move(proxy), kMaskOrigin, swizzle};
364 } else {
365 GrSWMaskHelper helper;
366 if (!helper.init(bounds)) {
367 return {};
368 }
369
370 for (int i = 0; i < count; ++i) {
371 draw_to_sw_mask(&helper,*(elements[i]), i == 0);
372 }
373
374 return helper.toTextureView(context, SkBackingFit::kApprox);
375 }
376}
377
Brian Salomoneebe7352020-12-09 16:37:04 -0500378static void render_stencil_mask(GrRecordingContext* context, GrSurfaceDrawContext* rtc,
Michael Ludwiga195d102020-09-15 14:51:52 -0400379 uint32_t genID, const SkIRect& bounds,
380 const GrClipStack::Element** elements, int count,
381 GrAppliedClip* out) {
382 GrStencilMaskHelper helper(context, rtc);
383 if (helper.init(bounds, genID, out->windowRectsState().windows(), 0)) {
384 // This follows the same logic as in draw_sw_mask
385 bool startInside = elements[0]->fOp == SkClipOp::kDifference;
386 helper.clear(startInside);
387 for (int i = 0; i < count; ++i) {
388 const GrClipStack::Element& e = *(elements[i]);
389 SkRegion::Op op;
390 if (e.fOp == SkClipOp::kIntersect) {
391 op = (i == 0) ? SkRegion::kReplace_Op : SkRegion::kIntersect_Op;
392 } else {
393 op = SkRegion::kDifference_Op;
394 }
395 helper.drawShape(e.fShape, e.fLocalToDevice, op, e.fAA);
396 }
397 helper.finish();
398 }
399 out->hardClip().addStencilClip(genID);
400}
401
402} // anonymous namespace
403
404class GrClipStack::Draw {
405public:
406 Draw(const SkRect& drawBounds, GrAA aa)
407 : fBounds(GrClip::GetPixelIBounds(drawBounds, aa, BoundsType::kExterior))
408 , fAA(aa) {
409 // Be slightly more forgiving on whether or not a draw is inside a clip element.
410 fOriginalBounds = drawBounds.makeInset(GrClip::kBoundsTolerance, GrClip::kBoundsTolerance);
411 if (fOriginalBounds.isEmpty()) {
412 fOriginalBounds = drawBounds;
413 }
414 }
415
416 // Common clip type interface
417 SkClipOp op() const { return SkClipOp::kIntersect; }
418 const SkIRect& outerBounds() const { return fBounds; }
419
420 // Draw does not have inner bounds so cannot contain anything.
421 bool contains(const RawElement& e) const { return false; }
422 bool contains(const SaveRecord& s) const { return false; }
423
424 bool applyDeviceBounds(const SkIRect& deviceBounds) {
425 return fBounds.intersect(deviceBounds);
426 }
427
428 const SkRect& bounds() const { return fOriginalBounds; }
429 GrAA aa() const { return fAA; }
430
431private:
432 SkRect fOriginalBounds;
433 SkIRect fBounds;
434 GrAA fAA;
435};
436
437///////////////////////////////////////////////////////////////////////////////
438// GrClipStack::Element
439
440GrClipStack::RawElement::RawElement(const SkMatrix& localToDevice, const GrShape& shape,
441 GrAA aa, SkClipOp op)
442 : Element{shape, localToDevice, op, aa}
443 , fInnerBounds(SkIRect::MakeEmpty())
444 , fOuterBounds(SkIRect::MakeEmpty())
445 , fInvalidatedByIndex(-1) {
446 if (!localToDevice.invert(&fDeviceToLocal)) {
447 // If the transform can't be inverted, it means that two dimensions are collapsed to 0 or
448 // 1 dimension, making the device-space geometry effectively empty.
449 fShape.reset();
450 }
451}
452
453void GrClipStack::RawElement::markInvalid(const SaveRecord& current) {
454 SkASSERT(!this->isInvalid());
455 fInvalidatedByIndex = current.firstActiveElementIndex();
456}
457
458void GrClipStack::RawElement::restoreValid(const SaveRecord& current) {
459 if (current.firstActiveElementIndex() < fInvalidatedByIndex) {
460 fInvalidatedByIndex = -1;
461 }
462}
463
464bool GrClipStack::RawElement::contains(const Draw& d) const {
465 if (fInnerBounds.contains(d.outerBounds())) {
466 return true;
467 } else {
468 // If the draw is non-AA, use the already computed outer bounds so we don't need to use
469 // device-space outsetting inside shape_contains_rect.
470 SkRect queryBounds = d.aa() == GrAA::kYes ? d.bounds() : SkRect::Make(d.outerBounds());
471 return shape_contains_rect(fShape, fLocalToDevice, fDeviceToLocal,
472 queryBounds, SkMatrix::I(), /* mixed-aa */ false);
473 }
474}
475
476bool GrClipStack::RawElement::contains(const SaveRecord& s) const {
477 if (fInnerBounds.contains(s.outerBounds())) {
478 return true;
479 } else {
480 // This is very similar to contains(Draw) but we just have outerBounds to work with.
481 SkRect queryBounds = SkRect::Make(s.outerBounds());
482 return shape_contains_rect(fShape, fLocalToDevice, fDeviceToLocal,
483 queryBounds, SkMatrix::I(), /* mixed-aa */ false);
484 }
485}
486
487bool GrClipStack::RawElement::contains(const RawElement& e) const {
488 // This is similar to how RawElement checks containment for a Draw, except that both the tester
489 // and testee have a transform that needs to be considered.
490 if (fInnerBounds.contains(e.fOuterBounds)) {
491 return true;
492 }
493
494 bool mixedAA = fAA != e.fAA;
495 if (!mixedAA && fLocalToDevice == e.fLocalToDevice) {
496 // Test the shapes directly against each other, with a special check for a rrect+rrect
497 // containment (a intersect b == a implies b contains a) and paths (same gen ID, or same
498 // path for small paths means they contain each other).
499 static constexpr int kMaxPathComparePoints = 16;
500 if (fShape.isRRect() && e.fShape.isRRect()) {
501 return SkRRectPriv::ConservativeIntersect(fShape.rrect(), e.fShape.rrect())
502 == e.fShape.rrect();
503 } else if (fShape.isPath() && e.fShape.isPath()) {
504 return fShape.path().getGenerationID() == e.fShape.path().getGenerationID() ||
505 (fShape.path().getPoints(nullptr, 0) <= kMaxPathComparePoints &&
506 fShape.path() == e.fShape.path());
507 } // else fall through to shape_contains_rect
508 }
509
510 return shape_contains_rect(fShape, fLocalToDevice, fDeviceToLocal,
511 e.fShape.bounds(), e.fLocalToDevice, mixedAA);
512
513}
514
515void GrClipStack::RawElement::simplify(const SkIRect& deviceBounds, bool forceAA) {
516 // Make sure the shape is not inverted. An inverted shape is equivalent to a non-inverted shape
517 // with the clip op toggled.
518 if (fShape.inverted()) {
519 fOp = fOp == SkClipOp::kIntersect ? SkClipOp::kDifference : SkClipOp::kIntersect;
520 fShape.setInverted(false);
521 }
522
523 // Then simplify the base shape, if it becomes empty, no need to update the bounds
524 fShape.simplify();
525 SkASSERT(!fShape.inverted());
526 if (fShape.isEmpty()) {
527 return;
528 }
529
530 // Lines and points should have been turned into empty since we assume everything is filled
531 SkASSERT(!fShape.isPoint() && !fShape.isLine());
532 // Validity check, we have no public API to create an arc at the moment
533 SkASSERT(!fShape.isArc());
534
535 SkRect outer = fLocalToDevice.mapRect(fShape.bounds());
536 if (!outer.intersect(SkRect::Make(deviceBounds))) {
537 // A non-empty shape is offscreen, so treat it as empty
538 fShape.reset();
539 return;
540 }
541
Michael Ludwig462bdfc2020-09-22 16:27:04 -0400542 // Except for axis-aligned clip rects, upgrade to AA when forced. We skip axis-aligned clip
543 // rects because a non-AA axis aligned rect can always be set as just a scissor test or window
544 // rect, avoiding an expensive stencil mask generation.
Michael Ludwigd30e9ef2020-09-28 12:03:01 -0400545 if (forceAA && !(fShape.isRect() && fLocalToDevice.preservesAxisAlignment())) {
Michael Ludwiga195d102020-09-15 14:51:52 -0400546 fAA = GrAA::kYes;
547 }
548
549 // Except for non-AA axis-aligned rects, the outer bounds is the rounded-out device-space
550 // mapped bounds of the shape.
551 fOuterBounds = GrClip::GetPixelIBounds(outer, fAA, BoundsType::kExterior);
552
Michael Ludwigd30e9ef2020-09-28 12:03:01 -0400553 if (fLocalToDevice.preservesAxisAlignment()) {
Michael Ludwiga195d102020-09-15 14:51:52 -0400554 if (fShape.isRect()) {
555 // The actual geometry can be updated to the device-intersected bounds and we can
556 // know the inner bounds
557 fShape.rect() = outer;
558 fLocalToDevice.setIdentity();
559 fDeviceToLocal.setIdentity();
560
561 if (fAA == GrAA::kNo && outer.width() >= 1.f && outer.height() >= 1.f) {
562 // NOTE: Legacy behavior to avoid performance regressions. For non-aa axis-aligned
563 // clip rects we always just round so that they can be scissor-only (avoiding the
564 // uncertainty in how a GPU might actually round an edge on fractional coords).
565 fOuterBounds = outer.round();
566 fInnerBounds = fOuterBounds;
567 } else {
568 fInnerBounds = GrClip::GetPixelIBounds(outer, fAA, BoundsType::kInterior);
569 SkASSERT(fOuterBounds.contains(fInnerBounds) || fInnerBounds.isEmpty());
570 }
571 } else if (fShape.isRRect()) {
Michael Ludwig3dad8032020-09-28 11:24:05 -0400572 // Can't transform in place and must still check transform result since some very
573 // ill-formed scale+translate matrices can cause invalid rrect radii.
574 SkRRect src;
575 if (fShape.rrect().transform(fLocalToDevice, &src)) {
576 fShape.rrect() = src;
577 fLocalToDevice.setIdentity();
578 fDeviceToLocal.setIdentity();
Michael Ludwiga195d102020-09-15 14:51:52 -0400579
Michael Ludwig3dad8032020-09-28 11:24:05 -0400580 SkRect inner = SkRRectPriv::InnerBounds(fShape.rrect());
581 fInnerBounds = GrClip::GetPixelIBounds(inner, fAA, BoundsType::kInterior);
582 if (!fInnerBounds.intersect(deviceBounds)) {
583 fInnerBounds = SkIRect::MakeEmpty();
584 }
Michael Ludwiga195d102020-09-15 14:51:52 -0400585 }
586 }
587 }
588
589 if (fOuterBounds.isEmpty()) {
590 // This can happen if we have non-AA shapes smaller than a pixel that do not cover a pixel
591 // center. We could round out, but rasterization would still result in an empty clip.
592 fShape.reset();
593 }
594
595 // Post-conditions on inner and outer bounds
596 SkASSERT(fShape.isEmpty() || (!fOuterBounds.isEmpty() && deviceBounds.contains(fOuterBounds)));
597 SkASSERT(fShape.isEmpty() || fInnerBounds.isEmpty() || fOuterBounds.contains(fInnerBounds));
598}
599
600bool GrClipStack::RawElement::combine(const RawElement& other, const SaveRecord& current) {
601 // To reduce the number of possibilities, only consider intersect+intersect. Difference and
602 // mixed op cases could be analyzed to simplify one of the shapes, but that is a rare
603 // occurrence and the math is much more complicated.
604 if (other.fOp != SkClipOp::kIntersect || fOp != SkClipOp::kIntersect) {
605 return false;
606 }
607
608 // At the moment, only rect+rect or rrect+rrect are supported (although rect+rrect is
609 // treated as a degenerate case of rrect+rrect).
610 bool shapeUpdated = false;
611 if (fShape.isRect() && other.fShape.isRect()) {
612 bool aaMatch = fAA == other.fAA;
613 if (fLocalToDevice.isIdentity() && other.fLocalToDevice.isIdentity() && !aaMatch) {
614 if (GrClip::IsPixelAligned(fShape.rect())) {
615 // Our AA type doesn't really matter, take other's since its edges may not be
616 // pixel aligned, so after intersection clip behavior should respect its aa type.
617 fAA = other.fAA;
618 } else if (!GrClip::IsPixelAligned(other.fShape.rect())) {
619 // Neither shape is pixel aligned and AA types don't match so can't combine
620 return false;
621 }
622 // Either we've updated this->fAA to actually match, or other->fAA doesn't matter so
623 // this can be set to true. We just can't modify other to set it's aa to this->fAA.
624 // But since 'this' becomes the combo of the two, other will be deleted so that's fine.
625 aaMatch = true;
626 }
627
628 if (aaMatch && fLocalToDevice == other.fLocalToDevice) {
629 if (!fShape.rect().intersect(other.fShape.rect())) {
630 // By floating point, it turns out the combination should be empty
631 this->fShape.reset();
632 this->markInvalid(current);
633 return true;
634 }
635 shapeUpdated = true;
636 }
637 } else if ((fShape.isRect() || fShape.isRRect()) &&
638 (other.fShape.isRect() || other.fShape.isRRect())) {
639 // No such pixel-aligned disregard for AA for round rects
640 if (fAA == other.fAA && fLocalToDevice == other.fLocalToDevice) {
641 // Treat rrect+rect intersections as rrect+rrect
642 SkRRect a = fShape.isRect() ? SkRRect::MakeRect(fShape.rect()) : fShape.rrect();
643 SkRRect b = other.fShape.isRect() ? SkRRect::MakeRect(other.fShape.rect())
644 : other.fShape.rrect();
645
646 SkRRect joined = SkRRectPriv::ConservativeIntersect(a, b);
647 if (!joined.isEmpty()) {
648 // Can reduce to a single element
649 if (joined.isRect()) {
650 // And with a simplified type
651 fShape.setRect(joined.rect());
652 } else {
653 fShape.setRRect(joined);
654 }
655 shapeUpdated = true;
656 } else if (!a.getBounds().intersects(b.getBounds())) {
657 // Like the rect+rect combination, the intersection is actually empty
658 fShape.reset();
659 this->markInvalid(current);
660 return true;
661 }
662 }
663 }
664
665 if (shapeUpdated) {
666 // This logic works under the assumption that both combined elements were intersect, so we
667 // don't do the full bounds computations like in simplify().
668 SkASSERT(fOp == SkClipOp::kIntersect && other.fOp == SkClipOp::kIntersect);
669 SkAssertResult(fOuterBounds.intersect(other.fOuterBounds));
670 if (!fInnerBounds.intersect(other.fInnerBounds)) {
671 fInnerBounds = SkIRect::MakeEmpty();
672 }
673 return true;
674 } else {
675 return false;
676 }
677}
678
679void GrClipStack::RawElement::updateForElement(RawElement* added, const SaveRecord& current) {
680 if (this->isInvalid()) {
681 // Already doesn't do anything, so skip this element
682 return;
683 }
684
685 // 'A' refers to this element, 'B' refers to 'added'.
686 switch (get_clip_geometry(*this, *added)) {
687 case ClipGeometry::kEmpty:
688 // Mark both elements as invalid to signal that the clip is fully empty
689 this->markInvalid(current);
690 added->markInvalid(current);
691 break;
692
693 case ClipGeometry::kAOnly:
694 // This element already clips more than 'added', so mark 'added' is invalid to skip it
695 added->markInvalid(current);
696 break;
697
698 case ClipGeometry::kBOnly:
699 // 'added' clips more than this element, so mark this as invalid
700 this->markInvalid(current);
701 break;
702
703 case ClipGeometry::kBoth:
704 // Else the bounds checks think we need to keep both, but depending on the combination
705 // of the ops and shape kinds, we may be able to do better.
706 if (added->combine(*this, current)) {
707 // 'added' now fully represents the combination of the two elements
708 this->markInvalid(current);
709 }
710 break;
711 }
712}
713
714GrClipStack::ClipState GrClipStack::RawElement::clipType() const {
715 // Map from the internal shape kind to the clip state enum
716 switch (fShape.type()) {
717 case GrShape::Type::kEmpty:
718 return ClipState::kEmpty;
719
720 case GrShape::Type::kRect:
721 return fOp == SkClipOp::kIntersect && fLocalToDevice.isIdentity()
722 ? ClipState::kDeviceRect : ClipState::kComplex;
723
724 case GrShape::Type::kRRect:
725 return fOp == SkClipOp::kIntersect && fLocalToDevice.isIdentity()
726 ? ClipState::kDeviceRRect : ClipState::kComplex;
727
728 case GrShape::Type::kArc:
729 case GrShape::Type::kLine:
730 case GrShape::Type::kPoint:
731 // These types should never become RawElements
732 SkASSERT(false);
733 [[fallthrough]];
734
735 case GrShape::Type::kPath:
736 return ClipState::kComplex;
737 }
738 SkUNREACHABLE;
739}
740
741///////////////////////////////////////////////////////////////////////////////
742// GrClipStack::Mask
743
744GrClipStack::Mask::Mask(const SaveRecord& current, const SkIRect& drawBounds)
745 : fBounds(drawBounds)
746 , fGenID(current.genID()) {
747 static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
748
749 // The gen ID should not be invalid, empty, or wide open, since those do not require masks
750 SkASSERT(fGenID != kInvalidGenID && fGenID != kEmptyGenID && fGenID != kWideOpenGenID);
751
752 GrUniqueKey::Builder builder(&fKey, kDomain, 3, "clip_mask");
753 builder[0] = fGenID;
754 // SkToS16 because image filters outset layers to a size indicated by the filter, which can
755 // sometimes result in negative coordinates from device space.
756 builder[1] = SkToS16(drawBounds.fLeft) | (SkToS16(drawBounds.fRight) << 16);
757 builder[2] = SkToS16(drawBounds.fTop) | (SkToS16(drawBounds.fBottom) << 16);
758 SkASSERT(fKey.isValid());
759
760 SkDEBUGCODE(fOwner = &current;)
761}
762
763bool GrClipStack::Mask::appliesToDraw(const SaveRecord& current, const SkIRect& drawBounds) const {
764 // For the same save record, a larger mask will have the same or more elements
765 // baked into it, so it can be reused to clip the smaller draw.
766 SkASSERT(fGenID != current.genID() || &current == fOwner);
767 return fGenID == current.genID() && fBounds.contains(drawBounds);
768}
769
770void GrClipStack::Mask::invalidate(GrProxyProvider* proxyProvider) {
771 SkASSERT(proxyProvider);
772 SkASSERT(fKey.isValid()); // Should only be invalidated once
773 proxyProvider->processInvalidUniqueKey(
774 fKey, nullptr, GrProxyProvider::InvalidateGPUResource::kYes);
775 fKey.reset();
776}
777
778///////////////////////////////////////////////////////////////////////////////
779// GrClipStack::SaveRecord
780
781GrClipStack::SaveRecord::SaveRecord(const SkIRect& deviceBounds)
782 : fInnerBounds(deviceBounds)
783 , fOuterBounds(deviceBounds)
784 , fShader(nullptr)
785 , fStartingMaskIndex(0)
786 , fStartingElementIndex(0)
787 , fOldestValidIndex(0)
788 , fDeferredSaveCount(0)
789 , fStackOp(SkClipOp::kIntersect)
790 , fState(ClipState::kWideOpen)
791 , fGenID(kInvalidGenID) {}
792
793GrClipStack::SaveRecord::SaveRecord(const SaveRecord& prior,
794 int startingMaskIndex,
795 int startingElementIndex)
796 : fInnerBounds(prior.fInnerBounds)
797 , fOuterBounds(prior.fOuterBounds)
798 , fShader(prior.fShader)
799 , fStartingMaskIndex(startingMaskIndex)
800 , fStartingElementIndex(startingElementIndex)
801 , fOldestValidIndex(prior.fOldestValidIndex)
802 , fDeferredSaveCount(0)
803 , fStackOp(prior.fStackOp)
804 , fState(prior.fState)
805 , fGenID(kInvalidGenID) {
806 // If the prior record never needed a mask, this one will insert into the same index
807 // (that's okay since we'll remove it when this record is popped off the stack).
808 SkASSERT(startingMaskIndex >= prior.fStartingMaskIndex);
809 // The same goes for elements (the prior could have been wide open).
810 SkASSERT(startingElementIndex >= prior.fStartingElementIndex);
811}
812
813uint32_t GrClipStack::SaveRecord::genID() const {
814 if (fState == ClipState::kEmpty) {
815 return kEmptyGenID;
816 } else if (fState == ClipState::kWideOpen) {
817 return kWideOpenGenID;
818 } else {
819 // The gen ID shouldn't be empty or wide open, since they are reserved for the above
820 // if-cases. It may be kInvalid if the record hasn't had any elements added to it yet.
821 SkASSERT(fGenID != kEmptyGenID && fGenID != kWideOpenGenID);
822 return fGenID;
823 }
824}
825
826GrClipStack::ClipState GrClipStack::SaveRecord::state() const {
827 if (fShader && fState != ClipState::kEmpty) {
828 return ClipState::kComplex;
829 } else {
830 return fState;
831 }
832}
833
834bool GrClipStack::SaveRecord::contains(const GrClipStack::Draw& draw) const {
835 return fInnerBounds.contains(draw.outerBounds());
836}
837
838bool GrClipStack::SaveRecord::contains(const GrClipStack::RawElement& element) const {
839 return fInnerBounds.contains(element.outerBounds());
840}
841
842void GrClipStack::SaveRecord::removeElements(RawElement::Stack* elements) {
843 while (elements->count() > fStartingElementIndex) {
844 elements->pop_back();
845 }
846}
847
848void GrClipStack::SaveRecord::restoreElements(RawElement::Stack* elements) {
849 // Presumably this SaveRecord is the new top of the stack, and so it owns the elements
850 // from its starting index to restoreCount - 1. Elements from the old save record have
851 // been destroyed already, so their indices would have been >= restoreCount, and any
852 // still-present element can be un-invalidated based on that.
853 int i = elements->count() - 1;
854 for (RawElement& e : elements->ritems()) {
855 if (i < fOldestValidIndex) {
856 break;
857 }
858 e.restoreValid(*this);
859 --i;
860 }
861}
862
863void GrClipStack::SaveRecord::invalidateMasks(GrProxyProvider* proxyProvider,
864 Mask::Stack* masks) {
865 // Must explicitly invalidate the key before removing the mask object from the stack
866 while (masks->count() > fStartingMaskIndex) {
867 SkASSERT(masks->back().owner() == this && proxyProvider);
868 masks->back().invalidate(proxyProvider);
869 masks->pop_back();
870 }
871 SkASSERT(masks->empty() || masks->back().genID() != fGenID);
872}
873
874void GrClipStack::SaveRecord::reset(const SkIRect& bounds) {
875 SkASSERT(this->canBeUpdated());
876 fOldestValidIndex = fStartingElementIndex;
877 fOuterBounds = bounds;
878 fInnerBounds = bounds;
879 fStackOp = SkClipOp::kIntersect;
880 fState = ClipState::kWideOpen;
881 fShader = nullptr;
882}
883
884void GrClipStack::SaveRecord::addShader(sk_sp<SkShader> shader) {
885 SkASSERT(shader);
886 SkASSERT(this->canBeUpdated());
887 if (!fShader) {
888 fShader = std::move(shader);
889 } else {
890 // The total coverage is computed by multiplying the coverage from each element (shape or
891 // shader), but since multiplication is associative, we can use kSrcIn blending to make
892 // a new shader that represents 'shader' * 'fShader'
893 fShader = SkShaders::Blend(SkBlendMode::kSrcIn, std::move(shader), fShader);
894 }
895}
896
897bool GrClipStack::SaveRecord::addElement(RawElement&& toAdd, RawElement::Stack* elements) {
898 // Validity check the element's state first; if the shape class isn't empty, the outer bounds
899 // shouldn't be empty; if the inner bounds are not empty, they must be contained in outer.
900 SkASSERT((toAdd.shape().isEmpty() || !toAdd.outerBounds().isEmpty()) &&
901 (toAdd.innerBounds().isEmpty() || toAdd.outerBounds().contains(toAdd.innerBounds())));
902 // And we shouldn't be adding an element if we have a deferred save
903 SkASSERT(this->canBeUpdated());
904
905 if (fState == ClipState::kEmpty) {
906 // The clip is already empty, and we only shrink, so there's no need to record this element.
907 return false;
908 } else if (toAdd.shape().isEmpty()) {
909 // An empty difference op should have been detected earlier, since it's a no-op
910 SkASSERT(toAdd.op() == SkClipOp::kIntersect);
911 fState = ClipState::kEmpty;
912 return true;
913 }
914
915 // In this invocation, 'A' refers to the existing stack's bounds and 'B' refers to the new
916 // element.
917 switch (get_clip_geometry(*this, toAdd)) {
918 case ClipGeometry::kEmpty:
919 // The combination results in an empty clip
920 fState = ClipState::kEmpty;
921 return true;
922
923 case ClipGeometry::kAOnly:
924 // The combination would not be any different than the existing clip
925 return false;
926
927 case ClipGeometry::kBOnly:
928 // The combination would invalidate the entire existing stack and can be replaced with
929 // just the new element.
930 this->replaceWithElement(std::move(toAdd), elements);
931 return true;
932
933 case ClipGeometry::kBoth:
934 // The new element combines in a complex manner, so update the stack's bounds based on
935 // the combination of its and the new element's ops (handled below)
936 break;
937 }
938
939 if (fState == ClipState::kWideOpen) {
940 // When the stack was wide open and the clip effect was kBoth, the "complex" manner is
941 // simply to keep the element and update the stack bounds to be the element's intersected
942 // with the device.
943 this->replaceWithElement(std::move(toAdd), elements);
944 return true;
945 }
946
947 // Some form of actual clip element(s) to combine with.
948 if (fStackOp == SkClipOp::kIntersect) {
949 if (toAdd.op() == SkClipOp::kIntersect) {
950 // Intersect (stack) + Intersect (toAdd)
951 // - Bounds updates is simply the paired intersections of outer and inner.
952 SkAssertResult(fOuterBounds.intersect(toAdd.outerBounds()));
953 if (!fInnerBounds.intersect(toAdd.innerBounds())) {
954 // NOTE: this does the right thing if either rect is empty, since we set the
955 // inner bounds to empty here
956 fInnerBounds = SkIRect::MakeEmpty();
957 }
958 } else {
959 // Intersect (stack) + Difference (toAdd)
960 // - Shrink the stack's outer bounds if the difference op's inner bounds completely
961 // cuts off an edge.
962 // - Shrink the stack's inner bounds to completely exclude the op's outer bounds.
963 fOuterBounds = subtract(fOuterBounds, toAdd.innerBounds(), /* exact */ true);
964 fInnerBounds = subtract(fInnerBounds, toAdd.outerBounds(), /* exact */ false);
965 }
966 } else {
967 if (toAdd.op() == SkClipOp::kIntersect) {
968 // Difference (stack) + Intersect (toAdd)
969 // - Bounds updates are just the mirror of Intersect(stack) + Difference(toAdd)
970 SkIRect oldOuter = fOuterBounds;
971 fOuterBounds = subtract(toAdd.outerBounds(), fInnerBounds, /* exact */ true);
972 fInnerBounds = subtract(toAdd.innerBounds(), oldOuter, /* exact */ false);
973 } else {
974 // Difference (stack) + Difference (toAdd)
975 // - The updated outer bounds is the union of outer bounds and the inner becomes the
976 // largest of the two possible inner bounds
977 fOuterBounds.join(toAdd.outerBounds());
978 if (toAdd.innerBounds().width() * toAdd.innerBounds().height() >
979 fInnerBounds.width() * fInnerBounds.height()) {
980 fInnerBounds = toAdd.innerBounds();
981 }
982 }
983 }
984
985 // If we get here, we're keeping the new element and the stack's bounds have been updated.
986 // We ought to have caught the cases where the stack bounds resemble an empty or wide open
987 // clip, so assert that's the case.
988 SkASSERT(!fOuterBounds.isEmpty() &&
989 (fInnerBounds.isEmpty() || fOuterBounds.contains(fInnerBounds)));
990
991 return this->appendElement(std::move(toAdd), elements);
992}
993
994bool GrClipStack::SaveRecord::appendElement(RawElement&& toAdd, RawElement::Stack* elements) {
995 // Update past elements to account for the new element
996 int i = elements->count() - 1;
997
998 // After the loop, elements between [max(youngestValid, startingIndex)+1, count-1] can be
999 // removed from the stack (these are the active elements that have been invalidated by the
1000 // newest element; since it's the active part of the stack, no restore() can bring them back).
1001 int youngestValid = fStartingElementIndex - 1;
1002 // After the loop, elements between [0, oldestValid-1] are all invalid. The value of oldestValid
1003 // becomes the save record's new fLastValidIndex value.
1004 int oldestValid = elements->count();
1005 // After the loop, this is the earliest active element that was invalidated. It may be
1006 // older in the stack than earliestValid, so cannot be popped off, but can be used to store
1007 // the new element instead of allocating more.
1008 RawElement* oldestActiveInvalid = nullptr;
1009 int oldestActiveInvalidIndex = elements->count();
1010
1011 for (RawElement& existing : elements->ritems()) {
1012 if (i < fOldestValidIndex) {
1013 break;
1014 }
1015 // We don't need to pass the actual index that toAdd will be saved to; just the minimum
1016 // index of this save record, since that will result in the same restoration behavior later.
1017 existing.updateForElement(&toAdd, *this);
1018
1019 if (toAdd.isInvalid()) {
1020 if (existing.isInvalid()) {
1021 // Both new and old invalid implies the entire clip becomes empty
1022 fState = ClipState::kEmpty;
1023 return true;
1024 } else {
1025 // The new element doesn't change the clip beyond what the old element already does
1026 return false;
1027 }
1028 } else if (existing.isInvalid()) {
1029 // The new element cancels out the old element. The new element may have been modified
1030 // to account for the old element's geometry.
1031 if (i >= fStartingElementIndex) {
1032 // Still active, so the invalidated index could be used to store the new element
1033 oldestActiveInvalid = &existing;
1034 oldestActiveInvalidIndex = i;
1035 }
1036 } else {
1037 // Keep both new and old elements
1038 oldestValid = i;
1039 if (i > youngestValid) {
1040 youngestValid = i;
1041 }
1042 }
1043
1044 --i;
1045 }
1046
1047 // Post-iteration validity check
1048 SkASSERT(oldestValid == elements->count() ||
1049 (oldestValid >= fOldestValidIndex && oldestValid < elements->count()));
1050 SkASSERT(youngestValid == fStartingElementIndex - 1 ||
1051 (youngestValid >= fStartingElementIndex && youngestValid < elements->count()));
1052 SkASSERT((oldestActiveInvalid && oldestActiveInvalidIndex >= fStartingElementIndex &&
1053 oldestActiveInvalidIndex < elements->count()) || !oldestActiveInvalid);
1054
1055 // Update final state
1056 SkASSERT(oldestValid >= fOldestValidIndex);
1057 fOldestValidIndex = std::min(oldestValid, oldestActiveInvalidIndex);
1058 fState = oldestValid == elements->count() ? toAdd.clipType() : ClipState::kComplex;
1059 if (fStackOp == SkClipOp::kDifference && toAdd.op() == SkClipOp::kIntersect) {
1060 // The stack remains in difference mode only as long as all elements are difference
1061 fStackOp = SkClipOp::kIntersect;
1062 }
1063
1064 int targetCount = youngestValid + 1;
1065 if (!oldestActiveInvalid || oldestActiveInvalidIndex >= targetCount) {
1066 // toAdd will be stored right after youngestValid
1067 targetCount++;
1068 oldestActiveInvalid = nullptr;
1069 }
1070 while (elements->count() > targetCount) {
1071 SkASSERT(oldestActiveInvalid != &elements->back()); // shouldn't delete what we'll reuse
1072 elements->pop_back();
1073 }
1074 if (oldestActiveInvalid) {
1075 *oldestActiveInvalid = std::move(toAdd);
1076 } else if (elements->count() < targetCount) {
1077 elements->push_back(std::move(toAdd));
1078 } else {
1079 elements->back() = std::move(toAdd);
1080 }
1081
1082 // Changing this will prompt GrClipStack to invalidate any masks associated with this record.
1083 fGenID = next_gen_id();
1084 return true;
1085}
1086
1087void GrClipStack::SaveRecord::replaceWithElement(RawElement&& toAdd, RawElement::Stack* elements) {
1088 // The aggregate state of the save record mirrors the element
1089 fInnerBounds = toAdd.innerBounds();
1090 fOuterBounds = toAdd.outerBounds();
1091 fStackOp = toAdd.op();
1092 fState = toAdd.clipType();
1093
1094 // All prior active element can be removed from the stack: [startingIndex, count - 1]
1095 int targetCount = fStartingElementIndex + 1;
1096 while (elements->count() > targetCount) {
1097 elements->pop_back();
1098 }
1099 if (elements->count() < targetCount) {
1100 elements->push_back(std::move(toAdd));
1101 } else {
1102 elements->back() = std::move(toAdd);
1103 }
1104
1105 SkASSERT(elements->count() == fStartingElementIndex + 1);
1106
1107 // This invalidates all older elements that are owned by save records lower in the clip stack.
1108 fOldestValidIndex = fStartingElementIndex;
1109 fGenID = next_gen_id();
1110}
1111
1112///////////////////////////////////////////////////////////////////////////////
1113// GrClipStack
1114
1115// NOTE: Based on draw calls in all GMs, SKPs, and SVGs as of 08/20, 98% use a clip stack with
1116// one Element and up to two SaveRecords, thus the inline size for RawElement::Stack and
1117// SaveRecord::Stack (this conveniently keeps the size of GrClipStack manageable). The max
1118// encountered element stack depth was 5 and the max save depth was 6. Using an increment of 8 for
1119// these stacks means that clip management will incur a single allocation for the remaining 2%
1120// of the draws, with extra head room for more complex clips encountered in the wild.
1121//
1122// The mask stack increment size was chosen to be smaller since only 0.2% of the evaluated draw call
1123// set ever used a mask (which includes stencil masks), or up to 0.3% when CCPR is disabled.
1124static constexpr int kElementStackIncrement = 8;
1125static constexpr int kSaveStackIncrement = 8;
1126static constexpr int kMaskStackIncrement = 4;
1127
1128// And from this same draw call set, the most complex clip could only use 5 analytic coverage FPs.
1129// Historically we limited it to 4 based on Blink's call pattern, so we keep the limit as-is since
1130// it's so close to the empirically encountered max.
1131static constexpr int kMaxAnalyticFPs = 4;
1132// The number of stack-allocated mask pointers to store before extending the arrays.
1133// Stack size determined empirically, the maximum number of elements put in a SW mask was 4
1134// across our set of GMs, SKPs, and SVGs used for testing.
1135static constexpr int kNumStackMasks = 4;
1136
1137GrClipStack::GrClipStack(const SkIRect& deviceBounds, const SkMatrixProvider* matrixProvider,
1138 bool forceAA)
1139 : fElements(kElementStackIncrement)
1140 , fSaves(kSaveStackIncrement)
1141 , fMasks(kMaskStackIncrement)
1142 , fProxyProvider(nullptr)
1143 , fDeviceBounds(deviceBounds)
1144 , fMatrixProvider(matrixProvider)
1145 , fForceAA(forceAA) {
1146 // Start with a save record that is wide open
1147 fSaves.emplace_back(deviceBounds);
1148}
1149
1150GrClipStack::~GrClipStack() {
1151 // Invalidate all mask keys that remain. Since we're tearing the clip stack down, we don't need
1152 // to go through SaveRecord.
1153 SkASSERT(fProxyProvider || fMasks.empty());
1154 if (fProxyProvider) {
1155 for (Mask& m : fMasks.ritems()) {
1156 m.invalidate(fProxyProvider);
1157 }
1158 }
1159}
1160
1161void GrClipStack::save() {
1162 SkASSERT(!fSaves.empty());
1163 fSaves.back().pushSave();
1164}
1165
1166void GrClipStack::restore() {
1167 SkASSERT(!fSaves.empty());
1168 SaveRecord& current = fSaves.back();
1169 if (current.popSave()) {
1170 // This was just a deferred save being undone, so the record doesn't need to be removed yet
1171 return;
1172 }
1173
1174 // When we remove a save record, we delete all elements >= its starting index and any masks
1175 // that were rasterized for it.
1176 current.removeElements(&fElements);
1177 SkASSERT(fProxyProvider || fMasks.empty());
1178 if (fProxyProvider) {
1179 current.invalidateMasks(fProxyProvider, &fMasks);
1180 }
1181 fSaves.pop_back();
1182 // Restore any remaining elements that were only invalidated by the now-removed save record.
1183 fSaves.back().restoreElements(&fElements);
1184}
1185
1186SkIRect GrClipStack::getConservativeBounds() const {
1187 const SaveRecord& current = this->currentSaveRecord();
1188 if (current.state() == ClipState::kEmpty) {
1189 return SkIRect::MakeEmpty();
1190 } else if (current.state() == ClipState::kWideOpen) {
1191 return fDeviceBounds;
1192 } else {
1193 if (current.op() == SkClipOp::kDifference) {
1194 // The outer/inner bounds represent what's cut out, so full bounds remains the device
1195 // bounds, minus any fully clipped content that spans the device edge.
1196 return subtract(fDeviceBounds, current.innerBounds(), /* exact */ true);
1197 } else {
1198 SkASSERT(fDeviceBounds.contains(current.outerBounds()));
1199 return current.outerBounds();
1200 }
1201 }
1202}
1203
1204GrClip::PreClipResult GrClipStack::preApply(const SkRect& bounds, GrAA aa) const {
1205 Draw draw(bounds, fForceAA ? GrAA::kYes : aa);
1206 if (!draw.applyDeviceBounds(fDeviceBounds)) {
1207 return GrClip::Effect::kClippedOut;
1208 }
1209
1210 const SaveRecord& cs = this->currentSaveRecord();
1211 // Early out if we know a priori that the clip is full 0s or full 1s.
1212 if (cs.state() == ClipState::kEmpty) {
1213 return GrClip::Effect::kClippedOut;
1214 } else if (cs.state() == ClipState::kWideOpen) {
1215 SkASSERT(!cs.shader());
1216 return GrClip::Effect::kUnclipped;
1217 }
1218
1219 // Given argument order, 'A' == current clip, 'B' == draw
1220 switch (get_clip_geometry(cs, draw)) {
1221 case ClipGeometry::kEmpty:
1222 // Can ignore the shader since the geometry removed everything already
1223 return GrClip::Effect::kClippedOut;
1224
1225 case ClipGeometry::kBOnly:
1226 // Geometrically, the draw is unclipped, but can't ignore a shader
1227 return cs.shader() ? GrClip::Effect::kClipped : GrClip::Effect::kUnclipped;
1228
1229 case ClipGeometry::kAOnly:
1230 // Shouldn't happen since the inner bounds of a draw are unknown
1231 SkASSERT(false);
1232 // But if it did, it technically means the draw covered the clip and should be
1233 // considered kClipped or similar, which is what the next case handles.
1234 [[fallthrough]];
1235
1236 case ClipGeometry::kBoth: {
1237 SkASSERT(fElements.count() > 0);
1238 const RawElement& back = fElements.back();
1239 if (cs.state() == ClipState::kDeviceRect) {
1240 SkASSERT(back.clipType() == ClipState::kDeviceRect);
1241 return {back.shape().rect(), back.aa()};
1242 } else if (cs.state() == ClipState::kDeviceRRect) {
1243 SkASSERT(back.clipType() == ClipState::kDeviceRRect);
1244 return {back.shape().rrect(), back.aa()};
1245 } else {
1246 // The clip stack has complex shapes, multiple elements, or a shader; we could
1247 // iterate per element like we would in apply(), but preApply() is meant to be
1248 // conservative and efficient.
1249 SkASSERT(cs.state() == ClipState::kComplex);
1250 return GrClip::Effect::kClipped;
1251 }
1252 }
1253 }
1254
1255 SkUNREACHABLE;
1256}
1257
Brian Salomoneebe7352020-12-09 16:37:04 -05001258GrClip::Effect GrClipStack::apply(GrRecordingContext* context, GrSurfaceDrawContext* rtc,
Michael Ludwiga195d102020-09-15 14:51:52 -04001259 GrAAType aa, bool hasUserStencilSettings,
1260 GrAppliedClip* out, SkRect* bounds) const {
1261 // TODO: Once we no longer store SW masks, we don't need to sneak the provider in like this
1262 if (!fProxyProvider) {
1263 fProxyProvider = context->priv().proxyProvider();
1264 }
1265 SkASSERT(fProxyProvider == context->priv().proxyProvider());
1266 const GrCaps* caps = context->priv().caps();
1267
1268 // Convert the bounds to a Draw and apply device bounds clipping, making our query as tight
1269 // as possible.
1270 Draw draw(*bounds, GrAA(fForceAA || aa != GrAAType::kNone));
1271 if (!draw.applyDeviceBounds(fDeviceBounds)) {
1272 return Effect::kClippedOut;
1273 }
1274 SkAssertResult(bounds->intersect(SkRect::Make(fDeviceBounds)));
1275
1276 const SaveRecord& cs = this->currentSaveRecord();
1277 // Early out if we know a priori that the clip is full 0s or full 1s.
1278 if (cs.state() == ClipState::kEmpty) {
1279 return Effect::kClippedOut;
1280 } else if (cs.state() == ClipState::kWideOpen) {
1281 SkASSERT(!cs.shader());
1282 return Effect::kUnclipped;
1283 }
1284
1285 // Convert any clip shader first, since it's not geometrically related to the draw bounds
1286 std::unique_ptr<GrFragmentProcessor> clipFP = nullptr;
1287 if (cs.shader()) {
1288 static const GrColorInfo kCoverageColorInfo{GrColorType::kUnknown, kPremul_SkAlphaType,
1289 nullptr};
Mike Reed52130b02020-12-28 15:33:13 -05001290 GrFPArgs args(context, *fMatrixProvider, SkSamplingOptions(), &kCoverageColorInfo);
Michael Ludwiga195d102020-09-15 14:51:52 -04001291 clipFP = as_SB(cs.shader())->asFragmentProcessor(args);
1292 if (clipFP) {
Michael Ludwig4ce77862020-10-27 18:07:29 -04001293 // The initial input is the coverage from the geometry processor, so this ensures it
1294 // is multiplied properly with the alpha of the clip shader.
1295 clipFP = GrFragmentProcessor::MulInputByChildAlpha(std::move(clipFP));
Michael Ludwiga195d102020-09-15 14:51:52 -04001296 }
1297 }
1298
1299 // A refers to the entire clip stack, B refers to the draw
1300 switch (get_clip_geometry(cs, draw)) {
1301 case ClipGeometry::kEmpty:
1302 return Effect::kClippedOut;
1303
1304 case ClipGeometry::kBOnly:
1305 // Geometrically unclipped, but may need to add the shader as a coverage FP
1306 if (clipFP) {
1307 out->addCoverageFP(std::move(clipFP));
1308 return Effect::kClipped;
1309 } else {
1310 return Effect::kUnclipped;
1311 }
1312
1313 case ClipGeometry::kAOnly:
1314 // Shouldn't happen since draws don't report inner bounds
1315 SkASSERT(false);
1316 [[fallthrough]];
1317
1318 case ClipGeometry::kBoth:
1319 // The draw is combined with the saved clip elements; the below logic tries to skip
1320 // as many elements as possible.
1321 SkASSERT(cs.state() == ClipState::kDeviceRect ||
1322 cs.state() == ClipState::kDeviceRRect ||
1323 cs.state() == ClipState::kComplex);
1324 break;
1325 }
1326
1327 // We can determine a scissor based on the draw and the overall stack bounds.
1328 SkIRect scissorBounds;
1329 if (cs.op() == SkClipOp::kIntersect) {
1330 // Initially we keep this as large as possible; if the clip is applied solely with coverage
1331 // FPs then using a loose scissor increases the chance we can batch the draws.
1332 // We tighten it later if any form of mask or atlas element is needed.
1333 scissorBounds = cs.outerBounds();
1334 } else {
1335 scissorBounds = subtract(draw.outerBounds(), cs.innerBounds(), /* exact */ true);
1336 }
1337
1338 // We mark this true once we have a coverage FP (since complex clipping is occurring), or we
1339 // have an element that wouldn't affect the scissored draw bounds, but does affect the regular
1340 // draw bounds. In that case, the scissor is sufficient for clipping and we can skip the
1341 // element but definitely cannot then drop the scissor.
1342 bool scissorIsNeeded = SkToBool(cs.shader());
1343
1344 int remainingAnalyticFPs = kMaxAnalyticFPs;
Michael Ludwigb28e1412020-09-18 15:07:49 -04001345 if (hasUserStencilSettings) {
1346 // Disable analytic clips when there are user stencil settings to ensure the clip is
1347 // respected in the stencil buffer.
Michael Ludwiga195d102020-09-15 14:51:52 -04001348 remainingAnalyticFPs = 0;
Michael Ludwigb28e1412020-09-18 15:07:49 -04001349 // If we have user stencil settings, we shouldn't be avoiding the stencil buffer anyways.
Michael Ludwiga195d102020-09-15 14:51:52 -04001350 SkASSERT(!context->priv().caps()->avoidStencilBuffers());
1351 }
1352
1353 // If window rectangles are supported, we can use them to exclude inner bounds of difference ops
Brian Salomon70fe17e2020-11-30 14:33:58 -05001354 int maxWindowRectangles = rtc->maxWindowRectangles();
Michael Ludwiga195d102020-09-15 14:51:52 -04001355 GrWindowRectangles windowRects;
1356
1357 // Elements not represented as an analytic FP or skipped will be collected here and later
1358 // applied by using the stencil buffer, CCPR clip atlas, or a cached SW mask.
1359 SkSTArray<kNumStackMasks, const Element*> elementsForMask;
1360 SkSTArray<kNumStackMasks, const RawElement*> elementsForAtlas;
1361
1362 bool maskRequiresAA = false;
1363 auto* ccpr = context->priv().drawingManager()->getCoverageCountingPathRenderer();
1364
1365 int i = fElements.count();
1366 for (const RawElement& e : fElements.ritems()) {
1367 --i;
1368 if (i < cs.oldestElementIndex()) {
1369 // All earlier elements have been invalidated by elements already processed
1370 break;
1371 } else if (e.isInvalid()) {
1372 continue;
1373 }
1374
1375 switch (get_clip_geometry(e, draw)) {
1376 case ClipGeometry::kEmpty:
1377 // This can happen for difference op elements that have a larger fInnerBounds than
1378 // can be preserved at the next level.
1379 return Effect::kClippedOut;
1380
1381 case ClipGeometry::kBOnly:
1382 // We don't need to produce a coverage FP or mask for the element
1383 break;
1384
1385 case ClipGeometry::kAOnly:
1386 // Shouldn't happen for draws, fall through to regular element processing
1387 SkASSERT(false);
1388 [[fallthrough]];
1389
1390 case ClipGeometry::kBoth: {
1391 // The element must apply coverage to the draw, enable the scissor to limit overdraw
1392 scissorIsNeeded = true;
1393
1394 // First apply using HW methods (scissor and window rects). When the inner and outer
1395 // bounds match, nothing else needs to be done.
1396 bool fullyApplied = false;
1397 if (e.op() == SkClipOp::kIntersect) {
1398 // The second test allows clipped draws that are scissored by multiple elements
1399 // to remain scissor-only.
1400 fullyApplied = e.innerBounds() == e.outerBounds() ||
1401 e.innerBounds().contains(scissorBounds);
1402 } else {
Robert Phillipsc4fbc8d2020-11-30 10:17:53 -05001403 if (!e.innerBounds().isEmpty() && windowRects.count() < maxWindowRectangles) {
Michael Ludwiga195d102020-09-15 14:51:52 -04001404 // TODO: If we have more difference ops than available window rects, we
1405 // should prioritize those with the largest inner bounds.
1406 windowRects.addWindow(e.innerBounds());
1407 fullyApplied = e.innerBounds() == e.outerBounds();
1408 }
1409 }
1410
1411 if (!fullyApplied && remainingAnalyticFPs > 0) {
1412 std::tie(fullyApplied, clipFP) = analytic_clip_fp(e.asElement(),
1413 *caps->shaderCaps(),
1414 std::move(clipFP));
1415 if (fullyApplied) {
1416 remainingAnalyticFPs--;
1417 } else if (ccpr && e.aa() == GrAA::kYes) {
1418 // While technically the element is turned into a mask, each atlas entry
1419 // counts towards the FP complexity of the clip.
1420 // TODO - CCPR needs a stable ops task ID so we can't create FPs until we
1421 // know any other mask generation is finished. It also only works with AA
1422 // shapes, future atlas systems can improve on this.
1423 elementsForAtlas.push_back(&e);
1424 remainingAnalyticFPs--;
1425 fullyApplied = true;
1426 }
1427 }
1428
1429 if (!fullyApplied) {
1430 elementsForMask.push_back(&e.asElement());
1431 maskRequiresAA |= (e.aa() == GrAA::kYes);
1432 }
1433
1434 break;
1435 }
1436 }
1437 }
1438
1439 if (!scissorIsNeeded) {
1440 // More detailed analysis of the element shapes determined no clip is needed
1441 SkASSERT(elementsForMask.empty() && elementsForAtlas.empty() && !clipFP);
1442 return Effect::kUnclipped;
1443 }
1444
1445 // Fill out the GrAppliedClip with what we know so far, possibly with a tightened scissor
1446 if (cs.op() == SkClipOp::kIntersect &&
1447 (!elementsForMask.empty() || !elementsForAtlas.empty())) {
1448 SkAssertResult(scissorBounds.intersect(draw.outerBounds()));
1449 }
1450 if (!GrClip::IsInsideClip(scissorBounds, *bounds)) {
1451 out->hardClip().addScissor(scissorBounds, bounds);
1452 }
1453 if (!windowRects.empty()) {
1454 out->hardClip().addWindowRectangles(windowRects, GrWindowRectsState::Mode::kExclusive);
1455 }
1456
1457 // Now rasterize any remaining elements, either to the stencil or a SW mask. All elements are
1458 // flattened into a single mask.
1459 if (!elementsForMask.empty()) {
1460 bool stencilUnavailable = context->priv().caps()->avoidStencilBuffers() ||
1461 rtc->wrapsVkSecondaryCB();
1462
1463 bool hasSWMask = false;
1464 if ((rtc->numSamples() <= 1 && maskRequiresAA) || stencilUnavailable) {
1465 // Must use a texture mask to represent the combined clip elements since the stencil
1466 // cannot be used, or cannot handle smooth clips.
1467 std::tie(hasSWMask, clipFP) = GetSWMaskFP(
1468 context, &fMasks, cs, scissorBounds, elementsForMask.begin(),
1469 elementsForMask.count(), std::move(clipFP));
1470 }
1471
1472 if (!hasSWMask) {
1473 if (stencilUnavailable) {
1474 SkDebugf("WARNING: Clip mask requires stencil, but stencil unavailable. "
1475 "Draw will be ignored.\n");
1476 return Effect::kClippedOut;
1477 } else {
1478 // Rasterize the remaining elements to the stencil buffer
1479 render_stencil_mask(context, rtc, cs.genID(), scissorBounds,
1480 elementsForMask.begin(), elementsForMask.count(), out);
1481 }
1482 }
1483 }
1484
1485 // Finish CCPR paths now that the render target's ops task is stable.
1486 if (!elementsForAtlas.empty()) {
1487 uint32_t opsTaskID = rtc->getOpsTask()->uniqueID();
1488 for (int i = 0; i < elementsForAtlas.count(); ++i) {
1489 SkASSERT(elementsForAtlas[i]->aa() == GrAA::kYes);
1490 clipFP = clip_atlas_fp(ccpr, opsTaskID, scissorBounds, elementsForAtlas[i]->asElement(),
1491 elementsForAtlas[i]->devicePath(), *caps, std::move(clipFP));
1492 }
1493 }
1494
1495 if (clipFP) {
1496 // This will include all analytic FPs, all CCPR atlas FPs, and a SW mask FP.
1497 out->addCoverageFP(std::move(clipFP));
1498 }
1499
1500 SkASSERT(out->doesClip());
1501 return Effect::kClipped;
1502}
1503
1504GrClipStack::SaveRecord& GrClipStack::writableSaveRecord(bool* wasDeferred) {
1505 SaveRecord& current = fSaves.back();
1506 if (current.canBeUpdated()) {
1507 // Current record is still open, so it can be modified directly
1508 *wasDeferred = false;
1509 return current;
1510 } else {
1511 // Must undefer the save to get a new record.
1512 SkAssertResult(current.popSave());
1513 *wasDeferred = true;
1514 return fSaves.emplace_back(current, fMasks.count(), fElements.count());
1515 }
1516}
1517
1518void GrClipStack::clipShader(sk_sp<SkShader> shader) {
1519 // Shaders can't bring additional coverage
1520 if (this->currentSaveRecord().state() == ClipState::kEmpty) {
1521 return;
1522 }
1523
1524 bool wasDeferred;
1525 this->writableSaveRecord(&wasDeferred).addShader(std::move(shader));
1526 // Masks and geometry elements are not invalidated by updating the clip shader
1527}
1528
1529void GrClipStack::replaceClip(const SkIRect& rect) {
1530 bool wasDeferred;
1531 SaveRecord& save = this->writableSaveRecord(&wasDeferred);
1532
1533 if (!wasDeferred) {
1534 save.removeElements(&fElements);
1535 save.invalidateMasks(fProxyProvider, &fMasks);
1536 }
1537
1538 save.reset(fDeviceBounds);
1539 if (rect != fDeviceBounds) {
1540 this->clipRect(SkMatrix::I(), SkRect::Make(rect), GrAA::kNo, SkClipOp::kIntersect);
1541 }
1542}
1543
1544void GrClipStack::clip(RawElement&& element) {
1545 if (this->currentSaveRecord().state() == ClipState::kEmpty) {
1546 return;
1547 }
1548
1549 // Reduce the path to anything simpler, will apply the transform if it's a scale+translate
1550 // and ensures the element's bounds are clipped to the device (NOT the conservative clip bounds,
1551 // since those are based on the net effect of all elements while device bounds clipping happens
1552 // implicitly. During addElement, we may still be able to invalidate some older elements).
1553 element.simplify(fDeviceBounds, fForceAA);
1554 SkASSERT(!element.shape().inverted());
1555
1556 // An empty op means do nothing (for difference), or close the save record, so we try and detect
1557 // that early before doing additional unnecessary save record allocation.
1558 if (element.shape().isEmpty()) {
1559 if (element.op() == SkClipOp::kDifference) {
1560 // If the shape is empty and we're subtracting, this has no effect on the clip
1561 return;
1562 }
1563 // else we will make the clip empty, but we need a new save record to record that change
1564 // in the clip state; fall through to below and updateForElement() will handle it.
1565 }
1566
1567 bool wasDeferred;
1568 SaveRecord& save = this->writableSaveRecord(&wasDeferred);
1569 SkDEBUGCODE(uint32_t oldGenID = save.genID();)
1570 SkDEBUGCODE(int elementCount = fElements.count();)
1571 if (!save.addElement(std::move(element), &fElements)) {
1572 if (wasDeferred) {
1573 // We made a new save record, but ended up not adding an element to the stack.
1574 // So instead of keeping an empty save record around, pop it off and restore the counter
1575 SkASSERT(elementCount == fElements.count());
1576 fSaves.pop_back();
1577 fSaves.back().pushSave();
1578 } else {
1579 // Should not have changed gen ID if the element and save were not modified
1580 SkASSERT(oldGenID == save.genID());
1581 }
1582 } else {
1583 // The gen ID should be new, and should not be invalid
1584 SkASSERT(oldGenID != save.genID() && save.genID() != kInvalidGenID);
1585 if (fProxyProvider && !wasDeferred) {
1586 // We modified an active save record so any old masks it had can be invalidated
1587 save.invalidateMasks(fProxyProvider, &fMasks);
1588 }
1589 }
1590}
1591
1592GrFPResult GrClipStack::GetSWMaskFP(GrRecordingContext* context, Mask::Stack* masks,
1593 const SaveRecord& current, const SkIRect& bounds,
1594 const Element** elements, int count,
1595 std::unique_ptr<GrFragmentProcessor> clipFP) {
1596 GrProxyProvider* proxyProvider = context->priv().proxyProvider();
Brian Salomonc85bce82020-12-29 09:32:52 -05001597 GrSurfaceProxyView maskProxy;
Michael Ludwiga195d102020-09-15 14:51:52 -04001598
1599 SkIRect maskBounds; // may not be 'bounds' if we reuse a large clip mask
1600 // Check the existing masks from this save record for compatibility
1601 for (const Mask& m : masks->ritems()) {
1602 if (m.genID() != current.genID()) {
1603 break;
1604 }
1605 if (m.appliesToDraw(current, bounds)) {
Brian Salomonc85bce82020-12-29 09:32:52 -05001606 maskProxy = proxyProvider->findCachedProxyWithColorTypeFallback(
1607 m.key(), kMaskOrigin, GrColorType::kAlpha_8, 1);
1608 if (maskProxy) {
Michael Ludwiga195d102020-09-15 14:51:52 -04001609 maskBounds = m.bounds();
1610 break;
1611 }
1612 }
1613 }
1614
Brian Salomonc85bce82020-12-29 09:32:52 -05001615 if (!maskProxy) {
Michael Ludwiga195d102020-09-15 14:51:52 -04001616 // No existing mask was found, so need to render a new one
Brian Salomonc85bce82020-12-29 09:32:52 -05001617 maskProxy = render_sw_mask(context, bounds, elements, count);
1618 if (!maskProxy) {
Michael Ludwiga195d102020-09-15 14:51:52 -04001619 // If we still don't have one, there's nothing we can do
1620 return GrFPFailure(std::move(clipFP));
1621 }
1622
1623 // Register the mask for later invalidation
1624 Mask& mask = masks->emplace_back(current, bounds);
Brian Salomonc85bce82020-12-29 09:32:52 -05001625 proxyProvider->assignUniqueKeyToProxy(mask.key(), maskProxy.asTextureProxy());
Michael Ludwiga195d102020-09-15 14:51:52 -04001626 maskBounds = bounds;
1627 }
1628
1629 // Wrap the mask in an FP that samples it for coverage
Brian Salomonc85bce82020-12-29 09:32:52 -05001630 SkASSERT(maskProxy && maskProxy.origin() == kMaskOrigin);
Michael Ludwiga195d102020-09-15 14:51:52 -04001631
1632 GrSamplerState samplerState(GrSamplerState::WrapMode::kClampToBorder,
1633 GrSamplerState::Filter::kNearest);
1634 // Maps the device coords passed to the texture effect to the top-left corner of the mask, and
1635 // make sure that the draw bounds are pre-mapped into the mask's space as well.
1636 auto m = SkMatrix::Translate(-maskBounds.fLeft, -maskBounds.fTop);
1637 auto subset = SkRect::Make(bounds);
1638 subset.offset(-maskBounds.fLeft, -maskBounds.fTop);
1639 // We scissor to bounds. The mask's texel centers are aligned to device space
1640 // pixel centers. Hence this domain of texture coordinates.
1641 auto domain = subset.makeInset(0.5, 0.5);
Brian Salomonc85bce82020-12-29 09:32:52 -05001642 auto fp = GrTextureEffect::MakeSubset(std::move(maskProxy), kPremul_SkAlphaType, m,
1643 samplerState, subset, domain, *context->priv().caps());
Michael Ludwiga195d102020-09-15 14:51:52 -04001644 fp = GrDeviceSpaceEffect::Make(std::move(fp));
1645
1646 // Must combine the coverage sampled from the texture effect with the previous coverage
Brian Salomonb43d6992021-01-05 14:37:40 -05001647 fp = GrBlendFragmentProcessor::Make(std::move(fp), std::move(clipFP), SkBlendMode::kDstIn);
Michael Ludwiga195d102020-09-15 14:51:52 -04001648 return GrFPSuccess(std::move(fp));
1649}