blob: 4058395012d67ba6a00f2656125922b5c918dafa [file] [log] [blame]
Michael Ludwiga195d102020-09-15 14:51:52 -04001/*
2 * Copyright 2020 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/gpu/GrClipStack.h"
9
10#include "include/core/SkMatrix.h"
Chris Dalton7d592cd2021-03-11 22:49:33 -070011#include "src/core/SkPathPriv.h"
Michael Ludwiga195d102020-09-15 14:51:52 -040012#include "src/core/SkRRectPriv.h"
13#include "src/core/SkRectPriv.h"
14#include "src/core/SkTaskGroup.h"
15#include "src/gpu/GrClip.h"
Adlai Holler9e2c50e2021-02-09 14:41:52 -050016#include "src/gpu/GrDeferredProxyUploader.h"
Adlai Hollera0693042020-10-14 11:23:11 -040017#include "src/gpu/GrDirectContextPriv.h"
Michael Ludwiga195d102020-09-15 14:51:52 -040018#include "src/gpu/GrProxyProvider.h"
19#include "src/gpu/GrRecordingContextPriv.h"
Michael Ludwiga195d102020-09-15 14:51:52 -040020#include "src/gpu/GrSWMaskHelper.h"
21#include "src/gpu/GrStencilMaskHelper.h"
22#include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
23#include "src/gpu/effects/GrBlendFragmentProcessor.h"
24#include "src/gpu/effects/GrConvexPolyEffect.h"
25#include "src/gpu/effects/GrRRectEffect.h"
26#include "src/gpu/effects/GrTextureEffect.h"
27#include "src/gpu/effects/generated/GrAARectEffect.h"
28#include "src/gpu/effects/generated/GrDeviceSpaceEffect.h"
29#include "src/gpu/geometry/GrQuadUtils.h"
30
31namespace {
32
33// This captures which of the two elements in (A op B) would be required when they are combined,
34// where op is intersect or difference.
35enum class ClipGeometry {
36 kEmpty,
37 kAOnly,
38 kBOnly,
39 kBoth
40};
41
42// A and B can be Element, SaveRecord, or Draw. Supported combinations are, order not mattering,
43// (Element, Element), (Element, SaveRecord), (Element, Draw), and (SaveRecord, Draw).
44template<typename A, typename B>
45static ClipGeometry get_clip_geometry(const A& a, const B& b) {
46 // NOTE: SkIRect::Intersects() returns false when two rectangles touch at an edge (so the result
47 // is empty). This behavior is desired for the following clip effect policies.
48 if (a.op() == SkClipOp::kIntersect) {
49 if (b.op() == SkClipOp::kIntersect) {
50 // Intersect (A) + Intersect (B)
51 if (!SkIRect::Intersects(a.outerBounds(), b.outerBounds())) {
52 // Regions with non-zero coverage are disjoint, so intersection = empty
53 return ClipGeometry::kEmpty;
54 } else if (b.contains(a)) {
55 // B's full coverage region contains entirety of A, so intersection = A
56 return ClipGeometry::kAOnly;
57 } else if (a.contains(b)) {
58 // A's full coverage region contains entirety of B, so intersection = B
59 return ClipGeometry::kBOnly;
60 } else {
61 // The shapes intersect in some non-trivial manner
62 return ClipGeometry::kBoth;
63 }
64 } else {
65 SkASSERT(b.op() == SkClipOp::kDifference);
66 // Intersect (A) + Difference (B)
67 if (!SkIRect::Intersects(a.outerBounds(), b.outerBounds())) {
68 // A only intersects B's full coverage region, so intersection = A
69 return ClipGeometry::kAOnly;
70 } else if (b.contains(a)) {
71 // B's zero coverage region completely contains A, so intersection = empty
72 return ClipGeometry::kEmpty;
73 } else {
74 // Intersection cannot be simplified. Note that the combination of a intersect
75 // and difference op in this order cannot produce kBOnly
76 return ClipGeometry::kBoth;
77 }
78 }
79 } else {
80 SkASSERT(a.op() == SkClipOp::kDifference);
81 if (b.op() == SkClipOp::kIntersect) {
82 // Difference (A) + Intersect (B) - the mirror of Intersect(A) + Difference(B),
83 // but combining is commutative so this is equivalent barring naming.
84 if (!SkIRect::Intersects(b.outerBounds(), a.outerBounds())) {
85 // B only intersects A's full coverage region, so intersection = B
86 return ClipGeometry::kBOnly;
87 } else if (a.contains(b)) {
88 // A's zero coverage region completely contains B, so intersection = empty
89 return ClipGeometry::kEmpty;
90 } else {
91 // Cannot be simplified
92 return ClipGeometry::kBoth;
93 }
94 } else {
95 SkASSERT(b.op() == SkClipOp::kDifference);
96 // Difference (A) + Difference (B)
97 if (a.contains(b)) {
98 // A's zero coverage region contains B, so B doesn't remove any extra
99 // coverage from their intersection.
100 return ClipGeometry::kAOnly;
101 } else if (b.contains(a)) {
102 // Mirror of the above case, intersection = B instead
103 return ClipGeometry::kBOnly;
104 } else {
105 // Intersection of the two differences cannot be simplified. Note that for
106 // this op combination it is not possible to produce kEmpty.
107 return ClipGeometry::kBoth;
108 }
109 }
110 }
111}
112
113// a.contains(b) where a's local space is defined by 'aToDevice', and b's possibly separate local
114// space is defined by 'bToDevice'. 'a' and 'b' geometry are provided in their local spaces.
115// Automatically takes into account if the anti-aliasing policies differ. When the policies match,
116// we assume that coverage AA or GPU's non-AA rasterization will apply to A and B equivalently, so
117// we can compare the original shapes. When the modes are mixed, we outset B in device space first.
118static bool shape_contains_rect(
119 const GrShape& a, const SkMatrix& aToDevice, const SkMatrix& deviceToA,
120 const SkRect& b, const SkMatrix& bToDevice, bool mixedAAMode) {
121 if (!a.convex()) {
122 return false;
123 }
124
125 if (!mixedAAMode && aToDevice == bToDevice) {
126 // A and B are in the same coordinate space, so don't bother mapping
127 return a.conservativeContains(b);
Michael Ludwigd30e9ef2020-09-28 12:03:01 -0400128 } else if (bToDevice.isIdentity() && aToDevice.preservesAxisAlignment()) {
Michael Ludwig84a008f2020-09-18 15:30:55 -0400129 // Optimize the common case of draws (B, with identity matrix) and axis-aligned shapes,
130 // instead of checking the four corners separately.
131 SkRect bInA = b;
132 if (mixedAAMode) {
133 bInA.outset(0.5f, 0.5f);
134 }
135 SkAssertResult(deviceToA.mapRect(&bInA));
136 return a.conservativeContains(bInA);
Michael Ludwiga195d102020-09-15 14:51:52 -0400137 }
138
139 // Test each corner for contains; since a is convex, if all 4 corners of b's bounds are
140 // contained, then the entirety of b is within a.
141 GrQuad deviceQuad = GrQuad::MakeFromRect(b, bToDevice);
142 if (any(deviceQuad.w4f() < SkPathPriv::kW0PlaneDistance)) {
143 // Something in B actually projects behind the W = 0 plane and would be clipped to infinity,
144 // so it's extremely unlikely that A can contain B.
145 return false;
146 }
147 if (mixedAAMode) {
148 // Outset it so its edges are 1/2px out, giving us a buffer to avoid cases where a non-AA
149 // clip or draw would snap outside an aa element.
150 GrQuadUtils::Outset({0.5f, 0.5f, 0.5f, 0.5f}, &deviceQuad);
151 }
152
153 for (int i = 0; i < 4; ++i) {
154 SkPoint cornerInA = deviceQuad.point(i);
155 deviceToA.mapPoints(&cornerInA, 1);
156 if (!a.conservativeContains(cornerInA)) {
157 return false;
158 }
159 }
160
161 return true;
162}
163
164static SkIRect subtract(const SkIRect& a, const SkIRect& b, bool exact) {
165 SkIRect diff;
166 if (SkRectPriv::Subtract(a, b, &diff) || !exact) {
167 // Either A-B is exactly the rectangle stored in diff, or we don't need an exact answer
168 // and can settle for the subrect of A excluded from B (which is also 'diff')
169 return diff;
170 } else {
171 // For our purposes, we want the original A when A-B cannot be exactly represented
172 return a;
173 }
174}
175
176static GrClipEdgeType get_clip_edge_type(SkClipOp op, GrAA aa) {
177 if (op == SkClipOp::kIntersect) {
178 return aa == GrAA::kYes ? GrClipEdgeType::kFillAA : GrClipEdgeType::kFillBW;
179 } else {
180 return aa == GrAA::kYes ? GrClipEdgeType::kInverseFillAA : GrClipEdgeType::kInverseFillBW;
181 }
182}
183
184static uint32_t kInvalidGenID = 0;
185static uint32_t kEmptyGenID = 1;
186static uint32_t kWideOpenGenID = 2;
187
188static uint32_t next_gen_id() {
189 // 0-2 are reserved for invalid, empty & wide-open
190 static const uint32_t kFirstUnreservedGenID = 3;
191 static std::atomic<uint32_t> nextID{kFirstUnreservedGenID};
192
193 uint32_t id;
194 do {
Adlai Holler4888cda2020-11-06 16:37:37 -0500195 id = nextID.fetch_add(1, std::memory_order_relaxed);
Michael Ludwiga195d102020-09-15 14:51:52 -0400196 } while (id < kFirstUnreservedGenID);
197 return id;
198}
199
200// Functions for rendering / applying clip shapes in various ways
201// The general strategy is:
202// - Represent the clip element as an analytic FP that tests sk_FragCoord vs. its device shape
203// - Render the clip element to the stencil, if stencil is allowed and supports the AA, and the
204// size of the element indicates stenciling will be worth it, vs. making a mask.
205// - Try to put the individual element into a clip atlas, which is then sampled during the draw
206// - Render the element into a SW mask and upload it. If possible, the SW rasterization happens
207// in parallel.
208static constexpr GrSurfaceOrigin kMaskOrigin = kTopLeft_GrSurfaceOrigin;
209
210static GrFPResult analytic_clip_fp(const GrClipStack::Element& e,
211 const GrShaderCaps& caps,
212 std::unique_ptr<GrFragmentProcessor> fp) {
213 // All analytic clip shape FPs need to be in device space
214 GrClipEdgeType edgeType = get_clip_edge_type(e.fOp, e.fAA);
215 if (e.fLocalToDevice.isIdentity()) {
216 if (e.fShape.isRect()) {
217 return GrFPSuccess(GrAARectEffect::Make(std::move(fp), edgeType, e.fShape.rect()));
218 } else if (e.fShape.isRRect()) {
219 return GrRRectEffect::Make(std::move(fp), edgeType, e.fShape.rrect(), caps);
220 }
221 }
222
223 // A convex hull can be transformed into device space (this will handle rect shapes with a
224 // non-identity transform).
225 if (e.fShape.segmentMask() == SkPath::kLine_SegmentMask && e.fShape.convex()) {
226 SkPath devicePath;
227 e.fShape.asPath(&devicePath);
228 devicePath.transform(e.fLocalToDevice);
229 return GrConvexPolyEffect::Make(std::move(fp), edgeType, devicePath);
230 }
231
232 return GrFPFailure(std::move(fp));
233}
234
Derek Sollenberger396cd1d2021-04-02 15:44:36 +0000235// TODO: Currently this only works with CCPR because CCPR owns and manages the clip atlas. The
236// high-level concept should be generalized to support any path renderer going into a shared atlas.
237static GrFPResult clip_atlas_fp(GrCoverageCountingPathRenderer* ccpr,
238 uint32_t opsTaskID,
239 const SkIRect& bounds,
240 const GrClipStack::Element& e,
241 SkPath* devicePath,
242 const GrCaps& caps,
243 std::unique_ptr<GrFragmentProcessor> fp) {
244 // TODO: Currently the atlas manages device-space paths, so we have to transform by the ctm.
245 // In the future, the atlas manager should see the local path and the ctm so that it can
246 // cache across integer-only translations (internally, it already does this, just not exposed).
247 if (devicePath->isEmpty()) {
248 e.fShape.asPath(devicePath);
249 devicePath->transform(e.fLocalToDevice);
250 SkASSERT(!devicePath->isEmpty());
251 }
252
253 SkASSERT(!devicePath->isInverseFillType());
254 if (e.fOp == SkClipOp::kIntersect) {
255 return ccpr->makeClipProcessor(std::move(fp), opsTaskID, *devicePath, bounds, caps);
256 } else {
257 // Use kDstOut to convert the non-inverted mask alpha into (1-alpha), so the atlas only
258 // ever renders non-inverse filled paths.
259 // - When the input FP is null, this turns into "(1-sample(ccpr, 1).a) * input"
260 // - When not null, it works out to
261 // (1-sample(ccpr, input.rgb1).a) * sample(fp, input.rgb1) * input.a
262 // - Since clips only care about the alpha channel, these are both equivalent to the
263 // desired product of (1-ccpr) * fp * input.a.
264 auto [success, atlasFP] = ccpr->makeClipProcessor(nullptr, opsTaskID, *devicePath, bounds,
265 caps);
266 if (!success) {
267 // "Difference" draws that don't intersect the clip need to be drawn "wide open".
268 return GrFPSuccess(nullptr);
269 }
270 return GrFPSuccess(GrBlendFragmentProcessor::Make(std::move(atlasFP), // src
271 std::move(fp), // dst
272 SkBlendMode::kDstOut));
273 }
274}
275
Michael Ludwiga195d102020-09-15 14:51:52 -0400276static void draw_to_sw_mask(GrSWMaskHelper* helper, const GrClipStack::Element& e, bool clearMask) {
277 // If the first element to draw is an intersect, we clear to 0 and will draw it directly with
278 // coverage 1 (subsequent intersect elements will be inverse-filled and draw 0 outside).
279 // If the first element to draw is a difference, we clear to 1, and in all cases we draw the
280 // difference element directly with coverage 0.
281 if (clearMask) {
282 helper->clear(e.fOp == SkClipOp::kIntersect ? 0x00 : 0xFF);
283 }
284
285 uint8_t alpha;
286 bool invert;
287 if (e.fOp == SkClipOp::kIntersect) {
288 // Intersect modifies pixels outside of its geometry. If this isn't the first op, we
289 // draw the inverse-filled shape with 0 coverage to erase everything outside the element
290 // But if we are the first element, we can draw directly with coverage 1 since we
291 // cleared to 0.
292 if (clearMask) {
293 alpha = 0xFF;
294 invert = false;
295 } else {
296 alpha = 0x00;
297 invert = true;
298 }
299 } else {
300 // For difference ops, can always just subtract the shape directly by drawing 0 coverage
301 SkASSERT(e.fOp == SkClipOp::kDifference);
302 alpha = 0x00;
303 invert = false;
304 }
305
306 // Draw the shape; based on how we've initialized the buffer and chosen alpha+invert,
307 // every element is drawn with the kReplace_Op
308 if (invert) {
309 // Must invert the path
310 SkASSERT(!e.fShape.inverted());
311 // TODO: this is an extra copy effectively, just so we can toggle inversion; would be
312 // better perhaps to just call a drawPath() since we know it'll use path rendering w/
313 // the inverse fill type.
314 GrShape inverted(e.fShape);
315 inverted.setInverted(true);
316 helper->drawShape(inverted, e.fLocalToDevice, SkRegion::kReplace_Op, e.fAA, alpha);
317 } else {
318 helper->drawShape(e.fShape, e.fLocalToDevice, SkRegion::kReplace_Op, e.fAA, alpha);
319 }
320}
321
322static GrSurfaceProxyView render_sw_mask(GrRecordingContext* context, const SkIRect& bounds,
323 const GrClipStack::Element** elements, int count) {
324 SkASSERT(count > 0);
325
Adlai Hollercc25d532021-02-10 13:58:34 +0000326 SkTaskGroup* taskGroup = nullptr;
327 if (auto direct = context->asDirectContext()) {
328 taskGroup = direct->priv().getTaskGroup();
Michael Ludwiga195d102020-09-15 14:51:52 -0400329 }
Adlai Hollercc25d532021-02-10 13:58:34 +0000330
331 if (taskGroup) {
332 const GrCaps* caps = context->priv().caps();
333 GrProxyProvider* proxyProvider = context->priv().proxyProvider();
334
335 // Create our texture proxy
336 GrBackendFormat format = caps->getDefaultBackendFormat(GrColorType::kAlpha_8,
337 GrRenderable::kNo);
338
339 GrSwizzle swizzle = context->priv().caps()->getReadSwizzle(format, GrColorType::kAlpha_8);
340 auto proxy = proxyProvider->createProxy(format, bounds.size(), GrRenderable::kNo, 1,
341 GrMipMapped::kNo, SkBackingFit::kApprox,
342 SkBudgeted::kYes, GrProtected::kNo);
343
344 // Since this will be rendered on another thread, make a copy of the elements in case
345 // the clip stack is modified on the main thread
346 using Uploader = GrTDeferredProxyUploader<SkTArray<GrClipStack::Element>>;
347 std::unique_ptr<Uploader> uploader = std::make_unique<Uploader>(count);
348 for (int i = 0; i < count; ++i) {
349 uploader->data().push_back(*(elements[i]));
Michael Ludwiga195d102020-09-15 14:51:52 -0400350 }
Adlai Hollercc25d532021-02-10 13:58:34 +0000351
352 Uploader* uploaderRaw = uploader.get();
353 auto drawAndUploadMask = [uploaderRaw, bounds] {
354 TRACE_EVENT0("skia.gpu", "Threaded SW Clip Mask Render");
355 GrSWMaskHelper helper(uploaderRaw->getPixels());
356 if (helper.init(bounds)) {
357 for (int i = 0; i < uploaderRaw->data().count(); ++i) {
358 draw_to_sw_mask(&helper, uploaderRaw->data()[i], i == 0);
359 }
360 } else {
361 SkDEBUGFAIL("Unable to allocate SW clip mask.");
362 }
363 uploaderRaw->signalAndFreeData();
364 };
365
366 taskGroup->add(std::move(drawAndUploadMask));
367 proxy->texPriv().setDeferredUploader(std::move(uploader));
368
369 return {std::move(proxy), kMaskOrigin, swizzle};
370 } else {
371 GrSWMaskHelper helper;
372 if (!helper.init(bounds)) {
373 return {};
374 }
375
376 for (int i = 0; i < count; ++i) {
377 draw_to_sw_mask(&helper,*(elements[i]), i == 0);
378 }
379
380 return helper.toTextureView(context, SkBackingFit::kApprox);
381 }
Michael Ludwiga195d102020-09-15 14:51:52 -0400382}
383
Brian Salomoneebe7352020-12-09 16:37:04 -0500384static void render_stencil_mask(GrRecordingContext* context, GrSurfaceDrawContext* rtc,
Michael Ludwiga195d102020-09-15 14:51:52 -0400385 uint32_t genID, const SkIRect& bounds,
386 const GrClipStack::Element** elements, int count,
387 GrAppliedClip* out) {
388 GrStencilMaskHelper helper(context, rtc);
389 if (helper.init(bounds, genID, out->windowRectsState().windows(), 0)) {
390 // This follows the same logic as in draw_sw_mask
391 bool startInside = elements[0]->fOp == SkClipOp::kDifference;
392 helper.clear(startInside);
393 for (int i = 0; i < count; ++i) {
394 const GrClipStack::Element& e = *(elements[i]);
395 SkRegion::Op op;
396 if (e.fOp == SkClipOp::kIntersect) {
397 op = (i == 0) ? SkRegion::kReplace_Op : SkRegion::kIntersect_Op;
398 } else {
399 op = SkRegion::kDifference_Op;
400 }
401 helper.drawShape(e.fShape, e.fLocalToDevice, op, e.fAA);
402 }
403 helper.finish();
404 }
405 out->hardClip().addStencilClip(genID);
406}
407
408} // anonymous namespace
409
410class GrClipStack::Draw {
411public:
412 Draw(const SkRect& drawBounds, GrAA aa)
413 : fBounds(GrClip::GetPixelIBounds(drawBounds, aa, BoundsType::kExterior))
414 , fAA(aa) {
415 // Be slightly more forgiving on whether or not a draw is inside a clip element.
416 fOriginalBounds = drawBounds.makeInset(GrClip::kBoundsTolerance, GrClip::kBoundsTolerance);
417 if (fOriginalBounds.isEmpty()) {
418 fOriginalBounds = drawBounds;
419 }
420 }
421
422 // Common clip type interface
423 SkClipOp op() const { return SkClipOp::kIntersect; }
424 const SkIRect& outerBounds() const { return fBounds; }
425
426 // Draw does not have inner bounds so cannot contain anything.
427 bool contains(const RawElement& e) const { return false; }
428 bool contains(const SaveRecord& s) const { return false; }
429
430 bool applyDeviceBounds(const SkIRect& deviceBounds) {
431 return fBounds.intersect(deviceBounds);
432 }
433
434 const SkRect& bounds() const { return fOriginalBounds; }
435 GrAA aa() const { return fAA; }
436
437private:
438 SkRect fOriginalBounds;
439 SkIRect fBounds;
440 GrAA fAA;
441};
442
443///////////////////////////////////////////////////////////////////////////////
444// GrClipStack::Element
445
446GrClipStack::RawElement::RawElement(const SkMatrix& localToDevice, const GrShape& shape,
447 GrAA aa, SkClipOp op)
448 : Element{shape, localToDevice, op, aa}
449 , fInnerBounds(SkIRect::MakeEmpty())
450 , fOuterBounds(SkIRect::MakeEmpty())
451 , fInvalidatedByIndex(-1) {
452 if (!localToDevice.invert(&fDeviceToLocal)) {
453 // If the transform can't be inverted, it means that two dimensions are collapsed to 0 or
454 // 1 dimension, making the device-space geometry effectively empty.
455 fShape.reset();
456 }
457}
458
459void GrClipStack::RawElement::markInvalid(const SaveRecord& current) {
460 SkASSERT(!this->isInvalid());
461 fInvalidatedByIndex = current.firstActiveElementIndex();
462}
463
464void GrClipStack::RawElement::restoreValid(const SaveRecord& current) {
465 if (current.firstActiveElementIndex() < fInvalidatedByIndex) {
466 fInvalidatedByIndex = -1;
467 }
468}
469
470bool GrClipStack::RawElement::contains(const Draw& d) const {
471 if (fInnerBounds.contains(d.outerBounds())) {
472 return true;
473 } else {
474 // If the draw is non-AA, use the already computed outer bounds so we don't need to use
475 // device-space outsetting inside shape_contains_rect.
476 SkRect queryBounds = d.aa() == GrAA::kYes ? d.bounds() : SkRect::Make(d.outerBounds());
477 return shape_contains_rect(fShape, fLocalToDevice, fDeviceToLocal,
478 queryBounds, SkMatrix::I(), /* mixed-aa */ false);
479 }
480}
481
482bool GrClipStack::RawElement::contains(const SaveRecord& s) const {
483 if (fInnerBounds.contains(s.outerBounds())) {
484 return true;
485 } else {
486 // This is very similar to contains(Draw) but we just have outerBounds to work with.
487 SkRect queryBounds = SkRect::Make(s.outerBounds());
488 return shape_contains_rect(fShape, fLocalToDevice, fDeviceToLocal,
489 queryBounds, SkMatrix::I(), /* mixed-aa */ false);
490 }
491}
492
493bool GrClipStack::RawElement::contains(const RawElement& e) const {
494 // This is similar to how RawElement checks containment for a Draw, except that both the tester
495 // and testee have a transform that needs to be considered.
496 if (fInnerBounds.contains(e.fOuterBounds)) {
497 return true;
498 }
499
500 bool mixedAA = fAA != e.fAA;
501 if (!mixedAA && fLocalToDevice == e.fLocalToDevice) {
502 // Test the shapes directly against each other, with a special check for a rrect+rrect
503 // containment (a intersect b == a implies b contains a) and paths (same gen ID, or same
504 // path for small paths means they contain each other).
505 static constexpr int kMaxPathComparePoints = 16;
506 if (fShape.isRRect() && e.fShape.isRRect()) {
507 return SkRRectPriv::ConservativeIntersect(fShape.rrect(), e.fShape.rrect())
508 == e.fShape.rrect();
509 } else if (fShape.isPath() && e.fShape.isPath()) {
510 return fShape.path().getGenerationID() == e.fShape.path().getGenerationID() ||
511 (fShape.path().getPoints(nullptr, 0) <= kMaxPathComparePoints &&
512 fShape.path() == e.fShape.path());
513 } // else fall through to shape_contains_rect
514 }
515
516 return shape_contains_rect(fShape, fLocalToDevice, fDeviceToLocal,
517 e.fShape.bounds(), e.fLocalToDevice, mixedAA);
518
519}
520
521void GrClipStack::RawElement::simplify(const SkIRect& deviceBounds, bool forceAA) {
522 // Make sure the shape is not inverted. An inverted shape is equivalent to a non-inverted shape
523 // with the clip op toggled.
524 if (fShape.inverted()) {
525 fOp = fOp == SkClipOp::kIntersect ? SkClipOp::kDifference : SkClipOp::kIntersect;
526 fShape.setInverted(false);
527 }
528
529 // Then simplify the base shape, if it becomes empty, no need to update the bounds
530 fShape.simplify();
531 SkASSERT(!fShape.inverted());
532 if (fShape.isEmpty()) {
533 return;
534 }
535
536 // Lines and points should have been turned into empty since we assume everything is filled
537 SkASSERT(!fShape.isPoint() && !fShape.isLine());
538 // Validity check, we have no public API to create an arc at the moment
539 SkASSERT(!fShape.isArc());
540
541 SkRect outer = fLocalToDevice.mapRect(fShape.bounds());
542 if (!outer.intersect(SkRect::Make(deviceBounds))) {
543 // A non-empty shape is offscreen, so treat it as empty
544 fShape.reset();
545 return;
546 }
547
Michael Ludwig462bdfc2020-09-22 16:27:04 -0400548 // Except for axis-aligned clip rects, upgrade to AA when forced. We skip axis-aligned clip
549 // rects because a non-AA axis aligned rect can always be set as just a scissor test or window
550 // rect, avoiding an expensive stencil mask generation.
Michael Ludwigd30e9ef2020-09-28 12:03:01 -0400551 if (forceAA && !(fShape.isRect() && fLocalToDevice.preservesAxisAlignment())) {
Michael Ludwiga195d102020-09-15 14:51:52 -0400552 fAA = GrAA::kYes;
553 }
554
555 // Except for non-AA axis-aligned rects, the outer bounds is the rounded-out device-space
556 // mapped bounds of the shape.
557 fOuterBounds = GrClip::GetPixelIBounds(outer, fAA, BoundsType::kExterior);
558
Michael Ludwigd30e9ef2020-09-28 12:03:01 -0400559 if (fLocalToDevice.preservesAxisAlignment()) {
Michael Ludwiga195d102020-09-15 14:51:52 -0400560 if (fShape.isRect()) {
561 // The actual geometry can be updated to the device-intersected bounds and we can
562 // know the inner bounds
563 fShape.rect() = outer;
564 fLocalToDevice.setIdentity();
565 fDeviceToLocal.setIdentity();
566
567 if (fAA == GrAA::kNo && outer.width() >= 1.f && outer.height() >= 1.f) {
568 // NOTE: Legacy behavior to avoid performance regressions. For non-aa axis-aligned
569 // clip rects we always just round so that they can be scissor-only (avoiding the
570 // uncertainty in how a GPU might actually round an edge on fractional coords).
571 fOuterBounds = outer.round();
572 fInnerBounds = fOuterBounds;
573 } else {
574 fInnerBounds = GrClip::GetPixelIBounds(outer, fAA, BoundsType::kInterior);
575 SkASSERT(fOuterBounds.contains(fInnerBounds) || fInnerBounds.isEmpty());
576 }
577 } else if (fShape.isRRect()) {
Michael Ludwig3dad8032020-09-28 11:24:05 -0400578 // Can't transform in place and must still check transform result since some very
579 // ill-formed scale+translate matrices can cause invalid rrect radii.
580 SkRRect src;
581 if (fShape.rrect().transform(fLocalToDevice, &src)) {
582 fShape.rrect() = src;
583 fLocalToDevice.setIdentity();
584 fDeviceToLocal.setIdentity();
Michael Ludwiga195d102020-09-15 14:51:52 -0400585
Michael Ludwig3dad8032020-09-28 11:24:05 -0400586 SkRect inner = SkRRectPriv::InnerBounds(fShape.rrect());
587 fInnerBounds = GrClip::GetPixelIBounds(inner, fAA, BoundsType::kInterior);
588 if (!fInnerBounds.intersect(deviceBounds)) {
589 fInnerBounds = SkIRect::MakeEmpty();
590 }
Michael Ludwiga195d102020-09-15 14:51:52 -0400591 }
592 }
593 }
594
595 if (fOuterBounds.isEmpty()) {
596 // This can happen if we have non-AA shapes smaller than a pixel that do not cover a pixel
597 // center. We could round out, but rasterization would still result in an empty clip.
598 fShape.reset();
599 }
600
601 // Post-conditions on inner and outer bounds
602 SkASSERT(fShape.isEmpty() || (!fOuterBounds.isEmpty() && deviceBounds.contains(fOuterBounds)));
603 SkASSERT(fShape.isEmpty() || fInnerBounds.isEmpty() || fOuterBounds.contains(fInnerBounds));
604}
605
606bool GrClipStack::RawElement::combine(const RawElement& other, const SaveRecord& current) {
607 // To reduce the number of possibilities, only consider intersect+intersect. Difference and
608 // mixed op cases could be analyzed to simplify one of the shapes, but that is a rare
609 // occurrence and the math is much more complicated.
610 if (other.fOp != SkClipOp::kIntersect || fOp != SkClipOp::kIntersect) {
611 return false;
612 }
613
614 // At the moment, only rect+rect or rrect+rrect are supported (although rect+rrect is
615 // treated as a degenerate case of rrect+rrect).
616 bool shapeUpdated = false;
617 if (fShape.isRect() && other.fShape.isRect()) {
618 bool aaMatch = fAA == other.fAA;
619 if (fLocalToDevice.isIdentity() && other.fLocalToDevice.isIdentity() && !aaMatch) {
620 if (GrClip::IsPixelAligned(fShape.rect())) {
621 // Our AA type doesn't really matter, take other's since its edges may not be
622 // pixel aligned, so after intersection clip behavior should respect its aa type.
623 fAA = other.fAA;
624 } else if (!GrClip::IsPixelAligned(other.fShape.rect())) {
625 // Neither shape is pixel aligned and AA types don't match so can't combine
626 return false;
627 }
628 // Either we've updated this->fAA to actually match, or other->fAA doesn't matter so
629 // this can be set to true. We just can't modify other to set it's aa to this->fAA.
630 // But since 'this' becomes the combo of the two, other will be deleted so that's fine.
631 aaMatch = true;
632 }
633
634 if (aaMatch && fLocalToDevice == other.fLocalToDevice) {
635 if (!fShape.rect().intersect(other.fShape.rect())) {
636 // By floating point, it turns out the combination should be empty
637 this->fShape.reset();
638 this->markInvalid(current);
639 return true;
640 }
641 shapeUpdated = true;
642 }
643 } else if ((fShape.isRect() || fShape.isRRect()) &&
644 (other.fShape.isRect() || other.fShape.isRRect())) {
645 // No such pixel-aligned disregard for AA for round rects
646 if (fAA == other.fAA && fLocalToDevice == other.fLocalToDevice) {
647 // Treat rrect+rect intersections as rrect+rrect
648 SkRRect a = fShape.isRect() ? SkRRect::MakeRect(fShape.rect()) : fShape.rrect();
649 SkRRect b = other.fShape.isRect() ? SkRRect::MakeRect(other.fShape.rect())
650 : other.fShape.rrect();
651
652 SkRRect joined = SkRRectPriv::ConservativeIntersect(a, b);
653 if (!joined.isEmpty()) {
654 // Can reduce to a single element
655 if (joined.isRect()) {
656 // And with a simplified type
657 fShape.setRect(joined.rect());
658 } else {
659 fShape.setRRect(joined);
660 }
661 shapeUpdated = true;
662 } else if (!a.getBounds().intersects(b.getBounds())) {
663 // Like the rect+rect combination, the intersection is actually empty
664 fShape.reset();
665 this->markInvalid(current);
666 return true;
667 }
668 }
669 }
670
671 if (shapeUpdated) {
672 // This logic works under the assumption that both combined elements were intersect, so we
673 // don't do the full bounds computations like in simplify().
674 SkASSERT(fOp == SkClipOp::kIntersect && other.fOp == SkClipOp::kIntersect);
675 SkAssertResult(fOuterBounds.intersect(other.fOuterBounds));
676 if (!fInnerBounds.intersect(other.fInnerBounds)) {
677 fInnerBounds = SkIRect::MakeEmpty();
678 }
679 return true;
680 } else {
681 return false;
682 }
683}
684
685void GrClipStack::RawElement::updateForElement(RawElement* added, const SaveRecord& current) {
686 if (this->isInvalid()) {
687 // Already doesn't do anything, so skip this element
688 return;
689 }
690
691 // 'A' refers to this element, 'B' refers to 'added'.
692 switch (get_clip_geometry(*this, *added)) {
693 case ClipGeometry::kEmpty:
694 // Mark both elements as invalid to signal that the clip is fully empty
695 this->markInvalid(current);
696 added->markInvalid(current);
697 break;
698
699 case ClipGeometry::kAOnly:
700 // This element already clips more than 'added', so mark 'added' is invalid to skip it
701 added->markInvalid(current);
702 break;
703
704 case ClipGeometry::kBOnly:
705 // 'added' clips more than this element, so mark this as invalid
706 this->markInvalid(current);
707 break;
708
709 case ClipGeometry::kBoth:
710 // Else the bounds checks think we need to keep both, but depending on the combination
711 // of the ops and shape kinds, we may be able to do better.
712 if (added->combine(*this, current)) {
713 // 'added' now fully represents the combination of the two elements
714 this->markInvalid(current);
715 }
716 break;
717 }
718}
719
720GrClipStack::ClipState GrClipStack::RawElement::clipType() const {
721 // Map from the internal shape kind to the clip state enum
722 switch (fShape.type()) {
723 case GrShape::Type::kEmpty:
724 return ClipState::kEmpty;
725
726 case GrShape::Type::kRect:
727 return fOp == SkClipOp::kIntersect && fLocalToDevice.isIdentity()
728 ? ClipState::kDeviceRect : ClipState::kComplex;
729
730 case GrShape::Type::kRRect:
731 return fOp == SkClipOp::kIntersect && fLocalToDevice.isIdentity()
732 ? ClipState::kDeviceRRect : ClipState::kComplex;
733
734 case GrShape::Type::kArc:
735 case GrShape::Type::kLine:
736 case GrShape::Type::kPoint:
737 // These types should never become RawElements
738 SkASSERT(false);
739 [[fallthrough]];
740
741 case GrShape::Type::kPath:
742 return ClipState::kComplex;
743 }
744 SkUNREACHABLE;
745}
746
747///////////////////////////////////////////////////////////////////////////////
748// GrClipStack::Mask
749
750GrClipStack::Mask::Mask(const SaveRecord& current, const SkIRect& drawBounds)
751 : fBounds(drawBounds)
752 , fGenID(current.genID()) {
753 static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
754
755 // The gen ID should not be invalid, empty, or wide open, since those do not require masks
756 SkASSERT(fGenID != kInvalidGenID && fGenID != kEmptyGenID && fGenID != kWideOpenGenID);
757
758 GrUniqueKey::Builder builder(&fKey, kDomain, 3, "clip_mask");
759 builder[0] = fGenID;
760 // SkToS16 because image filters outset layers to a size indicated by the filter, which can
761 // sometimes result in negative coordinates from device space.
762 builder[1] = SkToS16(drawBounds.fLeft) | (SkToS16(drawBounds.fRight) << 16);
763 builder[2] = SkToS16(drawBounds.fTop) | (SkToS16(drawBounds.fBottom) << 16);
764 SkASSERT(fKey.isValid());
765
766 SkDEBUGCODE(fOwner = &current;)
767}
768
769bool GrClipStack::Mask::appliesToDraw(const SaveRecord& current, const SkIRect& drawBounds) const {
770 // For the same save record, a larger mask will have the same or more elements
771 // baked into it, so it can be reused to clip the smaller draw.
772 SkASSERT(fGenID != current.genID() || &current == fOwner);
773 return fGenID == current.genID() && fBounds.contains(drawBounds);
774}
775
776void GrClipStack::Mask::invalidate(GrProxyProvider* proxyProvider) {
777 SkASSERT(proxyProvider);
778 SkASSERT(fKey.isValid()); // Should only be invalidated once
779 proxyProvider->processInvalidUniqueKey(
780 fKey, nullptr, GrProxyProvider::InvalidateGPUResource::kYes);
781 fKey.reset();
782}
783
784///////////////////////////////////////////////////////////////////////////////
785// GrClipStack::SaveRecord
786
787GrClipStack::SaveRecord::SaveRecord(const SkIRect& deviceBounds)
788 : fInnerBounds(deviceBounds)
789 , fOuterBounds(deviceBounds)
790 , fShader(nullptr)
791 , fStartingMaskIndex(0)
792 , fStartingElementIndex(0)
793 , fOldestValidIndex(0)
794 , fDeferredSaveCount(0)
795 , fStackOp(SkClipOp::kIntersect)
796 , fState(ClipState::kWideOpen)
797 , fGenID(kInvalidGenID) {}
798
799GrClipStack::SaveRecord::SaveRecord(const SaveRecord& prior,
800 int startingMaskIndex,
801 int startingElementIndex)
802 : fInnerBounds(prior.fInnerBounds)
803 , fOuterBounds(prior.fOuterBounds)
804 , fShader(prior.fShader)
805 , fStartingMaskIndex(startingMaskIndex)
806 , fStartingElementIndex(startingElementIndex)
807 , fOldestValidIndex(prior.fOldestValidIndex)
808 , fDeferredSaveCount(0)
809 , fStackOp(prior.fStackOp)
810 , fState(prior.fState)
811 , fGenID(kInvalidGenID) {
812 // If the prior record never needed a mask, this one will insert into the same index
813 // (that's okay since we'll remove it when this record is popped off the stack).
814 SkASSERT(startingMaskIndex >= prior.fStartingMaskIndex);
815 // The same goes for elements (the prior could have been wide open).
816 SkASSERT(startingElementIndex >= prior.fStartingElementIndex);
817}
818
819uint32_t GrClipStack::SaveRecord::genID() const {
820 if (fState == ClipState::kEmpty) {
821 return kEmptyGenID;
822 } else if (fState == ClipState::kWideOpen) {
823 return kWideOpenGenID;
824 } else {
825 // The gen ID shouldn't be empty or wide open, since they are reserved for the above
826 // if-cases. It may be kInvalid if the record hasn't had any elements added to it yet.
827 SkASSERT(fGenID != kEmptyGenID && fGenID != kWideOpenGenID);
828 return fGenID;
829 }
830}
831
832GrClipStack::ClipState GrClipStack::SaveRecord::state() const {
833 if (fShader && fState != ClipState::kEmpty) {
834 return ClipState::kComplex;
835 } else {
836 return fState;
837 }
838}
839
840bool GrClipStack::SaveRecord::contains(const GrClipStack::Draw& draw) const {
841 return fInnerBounds.contains(draw.outerBounds());
842}
843
844bool GrClipStack::SaveRecord::contains(const GrClipStack::RawElement& element) const {
845 return fInnerBounds.contains(element.outerBounds());
846}
847
848void GrClipStack::SaveRecord::removeElements(RawElement::Stack* elements) {
849 while (elements->count() > fStartingElementIndex) {
850 elements->pop_back();
851 }
852}
853
854void GrClipStack::SaveRecord::restoreElements(RawElement::Stack* elements) {
855 // Presumably this SaveRecord is the new top of the stack, and so it owns the elements
856 // from its starting index to restoreCount - 1. Elements from the old save record have
857 // been destroyed already, so their indices would have been >= restoreCount, and any
858 // still-present element can be un-invalidated based on that.
859 int i = elements->count() - 1;
860 for (RawElement& e : elements->ritems()) {
861 if (i < fOldestValidIndex) {
862 break;
863 }
864 e.restoreValid(*this);
865 --i;
866 }
867}
868
869void GrClipStack::SaveRecord::invalidateMasks(GrProxyProvider* proxyProvider,
870 Mask::Stack* masks) {
871 // Must explicitly invalidate the key before removing the mask object from the stack
872 while (masks->count() > fStartingMaskIndex) {
873 SkASSERT(masks->back().owner() == this && proxyProvider);
874 masks->back().invalidate(proxyProvider);
875 masks->pop_back();
876 }
877 SkASSERT(masks->empty() || masks->back().genID() != fGenID);
878}
879
880void GrClipStack::SaveRecord::reset(const SkIRect& bounds) {
881 SkASSERT(this->canBeUpdated());
882 fOldestValidIndex = fStartingElementIndex;
883 fOuterBounds = bounds;
884 fInnerBounds = bounds;
885 fStackOp = SkClipOp::kIntersect;
886 fState = ClipState::kWideOpen;
887 fShader = nullptr;
888}
889
890void GrClipStack::SaveRecord::addShader(sk_sp<SkShader> shader) {
891 SkASSERT(shader);
892 SkASSERT(this->canBeUpdated());
893 if (!fShader) {
894 fShader = std::move(shader);
895 } else {
896 // The total coverage is computed by multiplying the coverage from each element (shape or
897 // shader), but since multiplication is associative, we can use kSrcIn blending to make
898 // a new shader that represents 'shader' * 'fShader'
899 fShader = SkShaders::Blend(SkBlendMode::kSrcIn, std::move(shader), fShader);
900 }
901}
902
903bool GrClipStack::SaveRecord::addElement(RawElement&& toAdd, RawElement::Stack* elements) {
904 // Validity check the element's state first; if the shape class isn't empty, the outer bounds
905 // shouldn't be empty; if the inner bounds are not empty, they must be contained in outer.
906 SkASSERT((toAdd.shape().isEmpty() || !toAdd.outerBounds().isEmpty()) &&
907 (toAdd.innerBounds().isEmpty() || toAdd.outerBounds().contains(toAdd.innerBounds())));
908 // And we shouldn't be adding an element if we have a deferred save
909 SkASSERT(this->canBeUpdated());
910
911 if (fState == ClipState::kEmpty) {
912 // The clip is already empty, and we only shrink, so there's no need to record this element.
913 return false;
914 } else if (toAdd.shape().isEmpty()) {
915 // An empty difference op should have been detected earlier, since it's a no-op
916 SkASSERT(toAdd.op() == SkClipOp::kIntersect);
917 fState = ClipState::kEmpty;
918 return true;
919 }
920
921 // In this invocation, 'A' refers to the existing stack's bounds and 'B' refers to the new
922 // element.
923 switch (get_clip_geometry(*this, toAdd)) {
924 case ClipGeometry::kEmpty:
925 // The combination results in an empty clip
926 fState = ClipState::kEmpty;
927 return true;
928
929 case ClipGeometry::kAOnly:
930 // The combination would not be any different than the existing clip
931 return false;
932
933 case ClipGeometry::kBOnly:
934 // The combination would invalidate the entire existing stack and can be replaced with
935 // just the new element.
936 this->replaceWithElement(std::move(toAdd), elements);
937 return true;
938
939 case ClipGeometry::kBoth:
940 // The new element combines in a complex manner, so update the stack's bounds based on
941 // the combination of its and the new element's ops (handled below)
942 break;
943 }
944
945 if (fState == ClipState::kWideOpen) {
946 // When the stack was wide open and the clip effect was kBoth, the "complex" manner is
947 // simply to keep the element and update the stack bounds to be the element's intersected
948 // with the device.
949 this->replaceWithElement(std::move(toAdd), elements);
950 return true;
951 }
952
953 // Some form of actual clip element(s) to combine with.
954 if (fStackOp == SkClipOp::kIntersect) {
955 if (toAdd.op() == SkClipOp::kIntersect) {
956 // Intersect (stack) + Intersect (toAdd)
957 // - Bounds updates is simply the paired intersections of outer and inner.
958 SkAssertResult(fOuterBounds.intersect(toAdd.outerBounds()));
959 if (!fInnerBounds.intersect(toAdd.innerBounds())) {
960 // NOTE: this does the right thing if either rect is empty, since we set the
961 // inner bounds to empty here
962 fInnerBounds = SkIRect::MakeEmpty();
963 }
964 } else {
965 // Intersect (stack) + Difference (toAdd)
966 // - Shrink the stack's outer bounds if the difference op's inner bounds completely
967 // cuts off an edge.
968 // - Shrink the stack's inner bounds to completely exclude the op's outer bounds.
969 fOuterBounds = subtract(fOuterBounds, toAdd.innerBounds(), /* exact */ true);
970 fInnerBounds = subtract(fInnerBounds, toAdd.outerBounds(), /* exact */ false);
971 }
972 } else {
973 if (toAdd.op() == SkClipOp::kIntersect) {
974 // Difference (stack) + Intersect (toAdd)
975 // - Bounds updates are just the mirror of Intersect(stack) + Difference(toAdd)
976 SkIRect oldOuter = fOuterBounds;
977 fOuterBounds = subtract(toAdd.outerBounds(), fInnerBounds, /* exact */ true);
978 fInnerBounds = subtract(toAdd.innerBounds(), oldOuter, /* exact */ false);
979 } else {
980 // Difference (stack) + Difference (toAdd)
981 // - The updated outer bounds is the union of outer bounds and the inner becomes the
982 // largest of the two possible inner bounds
983 fOuterBounds.join(toAdd.outerBounds());
984 if (toAdd.innerBounds().width() * toAdd.innerBounds().height() >
985 fInnerBounds.width() * fInnerBounds.height()) {
986 fInnerBounds = toAdd.innerBounds();
987 }
988 }
989 }
990
991 // If we get here, we're keeping the new element and the stack's bounds have been updated.
992 // We ought to have caught the cases where the stack bounds resemble an empty or wide open
993 // clip, so assert that's the case.
994 SkASSERT(!fOuterBounds.isEmpty() &&
995 (fInnerBounds.isEmpty() || fOuterBounds.contains(fInnerBounds)));
996
997 return this->appendElement(std::move(toAdd), elements);
998}
999
1000bool GrClipStack::SaveRecord::appendElement(RawElement&& toAdd, RawElement::Stack* elements) {
1001 // Update past elements to account for the new element
1002 int i = elements->count() - 1;
1003
1004 // After the loop, elements between [max(youngestValid, startingIndex)+1, count-1] can be
1005 // removed from the stack (these are the active elements that have been invalidated by the
1006 // newest element; since it's the active part of the stack, no restore() can bring them back).
1007 int youngestValid = fStartingElementIndex - 1;
1008 // After the loop, elements between [0, oldestValid-1] are all invalid. The value of oldestValid
1009 // becomes the save record's new fLastValidIndex value.
1010 int oldestValid = elements->count();
1011 // After the loop, this is the earliest active element that was invalidated. It may be
1012 // older in the stack than earliestValid, so cannot be popped off, but can be used to store
1013 // the new element instead of allocating more.
1014 RawElement* oldestActiveInvalid = nullptr;
1015 int oldestActiveInvalidIndex = elements->count();
1016
1017 for (RawElement& existing : elements->ritems()) {
1018 if (i < fOldestValidIndex) {
1019 break;
1020 }
1021 // We don't need to pass the actual index that toAdd will be saved to; just the minimum
1022 // index of this save record, since that will result in the same restoration behavior later.
1023 existing.updateForElement(&toAdd, *this);
1024
1025 if (toAdd.isInvalid()) {
1026 if (existing.isInvalid()) {
1027 // Both new and old invalid implies the entire clip becomes empty
1028 fState = ClipState::kEmpty;
1029 return true;
1030 } else {
1031 // The new element doesn't change the clip beyond what the old element already does
1032 return false;
1033 }
1034 } else if (existing.isInvalid()) {
1035 // The new element cancels out the old element. The new element may have been modified
1036 // to account for the old element's geometry.
1037 if (i >= fStartingElementIndex) {
1038 // Still active, so the invalidated index could be used to store the new element
1039 oldestActiveInvalid = &existing;
1040 oldestActiveInvalidIndex = i;
1041 }
1042 } else {
1043 // Keep both new and old elements
1044 oldestValid = i;
1045 if (i > youngestValid) {
1046 youngestValid = i;
1047 }
1048 }
1049
1050 --i;
1051 }
1052
1053 // Post-iteration validity check
1054 SkASSERT(oldestValid == elements->count() ||
1055 (oldestValid >= fOldestValidIndex && oldestValid < elements->count()));
1056 SkASSERT(youngestValid == fStartingElementIndex - 1 ||
1057 (youngestValid >= fStartingElementIndex && youngestValid < elements->count()));
1058 SkASSERT((oldestActiveInvalid && oldestActiveInvalidIndex >= fStartingElementIndex &&
1059 oldestActiveInvalidIndex < elements->count()) || !oldestActiveInvalid);
1060
1061 // Update final state
1062 SkASSERT(oldestValid >= fOldestValidIndex);
1063 fOldestValidIndex = std::min(oldestValid, oldestActiveInvalidIndex);
1064 fState = oldestValid == elements->count() ? toAdd.clipType() : ClipState::kComplex;
1065 if (fStackOp == SkClipOp::kDifference && toAdd.op() == SkClipOp::kIntersect) {
1066 // The stack remains in difference mode only as long as all elements are difference
1067 fStackOp = SkClipOp::kIntersect;
1068 }
1069
1070 int targetCount = youngestValid + 1;
1071 if (!oldestActiveInvalid || oldestActiveInvalidIndex >= targetCount) {
1072 // toAdd will be stored right after youngestValid
1073 targetCount++;
1074 oldestActiveInvalid = nullptr;
1075 }
1076 while (elements->count() > targetCount) {
1077 SkASSERT(oldestActiveInvalid != &elements->back()); // shouldn't delete what we'll reuse
1078 elements->pop_back();
1079 }
1080 if (oldestActiveInvalid) {
1081 *oldestActiveInvalid = std::move(toAdd);
1082 } else if (elements->count() < targetCount) {
1083 elements->push_back(std::move(toAdd));
1084 } else {
1085 elements->back() = std::move(toAdd);
1086 }
1087
1088 // Changing this will prompt GrClipStack to invalidate any masks associated with this record.
1089 fGenID = next_gen_id();
1090 return true;
1091}
1092
1093void GrClipStack::SaveRecord::replaceWithElement(RawElement&& toAdd, RawElement::Stack* elements) {
1094 // The aggregate state of the save record mirrors the element
1095 fInnerBounds = toAdd.innerBounds();
1096 fOuterBounds = toAdd.outerBounds();
1097 fStackOp = toAdd.op();
1098 fState = toAdd.clipType();
1099
1100 // All prior active element can be removed from the stack: [startingIndex, count - 1]
1101 int targetCount = fStartingElementIndex + 1;
1102 while (elements->count() > targetCount) {
1103 elements->pop_back();
1104 }
1105 if (elements->count() < targetCount) {
1106 elements->push_back(std::move(toAdd));
1107 } else {
1108 elements->back() = std::move(toAdd);
1109 }
1110
1111 SkASSERT(elements->count() == fStartingElementIndex + 1);
1112
1113 // This invalidates all older elements that are owned by save records lower in the clip stack.
1114 fOldestValidIndex = fStartingElementIndex;
1115 fGenID = next_gen_id();
1116}
1117
1118///////////////////////////////////////////////////////////////////////////////
1119// GrClipStack
1120
1121// NOTE: Based on draw calls in all GMs, SKPs, and SVGs as of 08/20, 98% use a clip stack with
1122// one Element and up to two SaveRecords, thus the inline size for RawElement::Stack and
1123// SaveRecord::Stack (this conveniently keeps the size of GrClipStack manageable). The max
1124// encountered element stack depth was 5 and the max save depth was 6. Using an increment of 8 for
1125// these stacks means that clip management will incur a single allocation for the remaining 2%
1126// of the draws, with extra head room for more complex clips encountered in the wild.
1127//
1128// The mask stack increment size was chosen to be smaller since only 0.2% of the evaluated draw call
Derek Sollenberger396cd1d2021-04-02 15:44:36 +00001129// set ever used a mask (which includes stencil masks), or up to 0.3% when CCPR is disabled.
Michael Ludwiga195d102020-09-15 14:51:52 -04001130static constexpr int kElementStackIncrement = 8;
1131static constexpr int kSaveStackIncrement = 8;
1132static constexpr int kMaskStackIncrement = 4;
1133
1134// And from this same draw call set, the most complex clip could only use 5 analytic coverage FPs.
1135// Historically we limited it to 4 based on Blink's call pattern, so we keep the limit as-is since
1136// it's so close to the empirically encountered max.
1137static constexpr int kMaxAnalyticFPs = 4;
1138// The number of stack-allocated mask pointers to store before extending the arrays.
1139// Stack size determined empirically, the maximum number of elements put in a SW mask was 4
1140// across our set of GMs, SKPs, and SVGs used for testing.
1141static constexpr int kNumStackMasks = 4;
1142
1143GrClipStack::GrClipStack(const SkIRect& deviceBounds, const SkMatrixProvider* matrixProvider,
1144 bool forceAA)
1145 : fElements(kElementStackIncrement)
1146 , fSaves(kSaveStackIncrement)
1147 , fMasks(kMaskStackIncrement)
1148 , fProxyProvider(nullptr)
1149 , fDeviceBounds(deviceBounds)
1150 , fMatrixProvider(matrixProvider)
1151 , fForceAA(forceAA) {
1152 // Start with a save record that is wide open
1153 fSaves.emplace_back(deviceBounds);
1154}
1155
1156GrClipStack::~GrClipStack() {
1157 // Invalidate all mask keys that remain. Since we're tearing the clip stack down, we don't need
1158 // to go through SaveRecord.
1159 SkASSERT(fProxyProvider || fMasks.empty());
1160 if (fProxyProvider) {
1161 for (Mask& m : fMasks.ritems()) {
1162 m.invalidate(fProxyProvider);
1163 }
1164 }
1165}
1166
1167void GrClipStack::save() {
1168 SkASSERT(!fSaves.empty());
1169 fSaves.back().pushSave();
1170}
1171
1172void GrClipStack::restore() {
1173 SkASSERT(!fSaves.empty());
1174 SaveRecord& current = fSaves.back();
1175 if (current.popSave()) {
1176 // This was just a deferred save being undone, so the record doesn't need to be removed yet
1177 return;
1178 }
1179
1180 // When we remove a save record, we delete all elements >= its starting index and any masks
1181 // that were rasterized for it.
1182 current.removeElements(&fElements);
1183 SkASSERT(fProxyProvider || fMasks.empty());
1184 if (fProxyProvider) {
1185 current.invalidateMasks(fProxyProvider, &fMasks);
1186 }
1187 fSaves.pop_back();
1188 // Restore any remaining elements that were only invalidated by the now-removed save record.
1189 fSaves.back().restoreElements(&fElements);
1190}
1191
1192SkIRect GrClipStack::getConservativeBounds() const {
1193 const SaveRecord& current = this->currentSaveRecord();
1194 if (current.state() == ClipState::kEmpty) {
1195 return SkIRect::MakeEmpty();
1196 } else if (current.state() == ClipState::kWideOpen) {
1197 return fDeviceBounds;
1198 } else {
1199 if (current.op() == SkClipOp::kDifference) {
1200 // The outer/inner bounds represent what's cut out, so full bounds remains the device
1201 // bounds, minus any fully clipped content that spans the device edge.
1202 return subtract(fDeviceBounds, current.innerBounds(), /* exact */ true);
1203 } else {
1204 SkASSERT(fDeviceBounds.contains(current.outerBounds()));
1205 return current.outerBounds();
1206 }
1207 }
1208}
1209
1210GrClip::PreClipResult GrClipStack::preApply(const SkRect& bounds, GrAA aa) const {
1211 Draw draw(bounds, fForceAA ? GrAA::kYes : aa);
1212 if (!draw.applyDeviceBounds(fDeviceBounds)) {
1213 return GrClip::Effect::kClippedOut;
1214 }
1215
1216 const SaveRecord& cs = this->currentSaveRecord();
1217 // Early out if we know a priori that the clip is full 0s or full 1s.
1218 if (cs.state() == ClipState::kEmpty) {
1219 return GrClip::Effect::kClippedOut;
1220 } else if (cs.state() == ClipState::kWideOpen) {
1221 SkASSERT(!cs.shader());
1222 return GrClip::Effect::kUnclipped;
1223 }
1224
1225 // Given argument order, 'A' == current clip, 'B' == draw
1226 switch (get_clip_geometry(cs, draw)) {
1227 case ClipGeometry::kEmpty:
1228 // Can ignore the shader since the geometry removed everything already
1229 return GrClip::Effect::kClippedOut;
1230
1231 case ClipGeometry::kBOnly:
1232 // Geometrically, the draw is unclipped, but can't ignore a shader
1233 return cs.shader() ? GrClip::Effect::kClipped : GrClip::Effect::kUnclipped;
1234
1235 case ClipGeometry::kAOnly:
1236 // Shouldn't happen since the inner bounds of a draw are unknown
1237 SkASSERT(false);
1238 // But if it did, it technically means the draw covered the clip and should be
1239 // considered kClipped or similar, which is what the next case handles.
1240 [[fallthrough]];
1241
1242 case ClipGeometry::kBoth: {
1243 SkASSERT(fElements.count() > 0);
1244 const RawElement& back = fElements.back();
1245 if (cs.state() == ClipState::kDeviceRect) {
1246 SkASSERT(back.clipType() == ClipState::kDeviceRect);
1247 return {back.shape().rect(), back.aa()};
1248 } else if (cs.state() == ClipState::kDeviceRRect) {
1249 SkASSERT(back.clipType() == ClipState::kDeviceRRect);
1250 return {back.shape().rrect(), back.aa()};
1251 } else {
1252 // The clip stack has complex shapes, multiple elements, or a shader; we could
1253 // iterate per element like we would in apply(), but preApply() is meant to be
1254 // conservative and efficient.
1255 SkASSERT(cs.state() == ClipState::kComplex);
1256 return GrClip::Effect::kClipped;
1257 }
1258 }
1259 }
1260
1261 SkUNREACHABLE;
1262}
1263
Brian Salomoneebe7352020-12-09 16:37:04 -05001264GrClip::Effect GrClipStack::apply(GrRecordingContext* context, GrSurfaceDrawContext* rtc,
Michael Ludwiga195d102020-09-15 14:51:52 -04001265 GrAAType aa, bool hasUserStencilSettings,
Derek Sollenberger396cd1d2021-04-02 15:44:36 +00001266 GrAppliedClip* out, SkRect* bounds) const {
Michael Ludwiga195d102020-09-15 14:51:52 -04001267 // TODO: Once we no longer store SW masks, we don't need to sneak the provider in like this
1268 if (!fProxyProvider) {
1269 fProxyProvider = context->priv().proxyProvider();
1270 }
1271 SkASSERT(fProxyProvider == context->priv().proxyProvider());
1272 const GrCaps* caps = context->priv().caps();
1273
1274 // Convert the bounds to a Draw and apply device bounds clipping, making our query as tight
1275 // as possible.
1276 Draw draw(*bounds, GrAA(fForceAA || aa != GrAAType::kNone));
1277 if (!draw.applyDeviceBounds(fDeviceBounds)) {
1278 return Effect::kClippedOut;
1279 }
1280 SkAssertResult(bounds->intersect(SkRect::Make(fDeviceBounds)));
1281
1282 const SaveRecord& cs = this->currentSaveRecord();
1283 // Early out if we know a priori that the clip is full 0s or full 1s.
1284 if (cs.state() == ClipState::kEmpty) {
1285 return Effect::kClippedOut;
1286 } else if (cs.state() == ClipState::kWideOpen) {
1287 SkASSERT(!cs.shader());
1288 return Effect::kUnclipped;
1289 }
1290
1291 // Convert any clip shader first, since it's not geometrically related to the draw bounds
1292 std::unique_ptr<GrFragmentProcessor> clipFP = nullptr;
1293 if (cs.shader()) {
1294 static const GrColorInfo kCoverageColorInfo{GrColorType::kUnknown, kPremul_SkAlphaType,
1295 nullptr};
Mike Reed12a75582021-03-20 10:49:02 -04001296 GrFPArgs args(context, *fMatrixProvider, &kCoverageColorInfo);
Michael Ludwiga195d102020-09-15 14:51:52 -04001297 clipFP = as_SB(cs.shader())->asFragmentProcessor(args);
1298 if (clipFP) {
Michael Ludwig4ce77862020-10-27 18:07:29 -04001299 // The initial input is the coverage from the geometry processor, so this ensures it
1300 // is multiplied properly with the alpha of the clip shader.
1301 clipFP = GrFragmentProcessor::MulInputByChildAlpha(std::move(clipFP));
Michael Ludwiga195d102020-09-15 14:51:52 -04001302 }
1303 }
1304
1305 // A refers to the entire clip stack, B refers to the draw
1306 switch (get_clip_geometry(cs, draw)) {
1307 case ClipGeometry::kEmpty:
1308 return Effect::kClippedOut;
1309
1310 case ClipGeometry::kBOnly:
1311 // Geometrically unclipped, but may need to add the shader as a coverage FP
1312 if (clipFP) {
1313 out->addCoverageFP(std::move(clipFP));
1314 return Effect::kClipped;
1315 } else {
1316 return Effect::kUnclipped;
1317 }
1318
1319 case ClipGeometry::kAOnly:
1320 // Shouldn't happen since draws don't report inner bounds
1321 SkASSERT(false);
1322 [[fallthrough]];
1323
1324 case ClipGeometry::kBoth:
1325 // The draw is combined with the saved clip elements; the below logic tries to skip
1326 // as many elements as possible.
1327 SkASSERT(cs.state() == ClipState::kDeviceRect ||
1328 cs.state() == ClipState::kDeviceRRect ||
1329 cs.state() == ClipState::kComplex);
1330 break;
1331 }
1332
1333 // We can determine a scissor based on the draw and the overall stack bounds.
1334 SkIRect scissorBounds;
1335 if (cs.op() == SkClipOp::kIntersect) {
1336 // Initially we keep this as large as possible; if the clip is applied solely with coverage
1337 // FPs then using a loose scissor increases the chance we can batch the draws.
1338 // We tighten it later if any form of mask or atlas element is needed.
1339 scissorBounds = cs.outerBounds();
1340 } else {
1341 scissorBounds = subtract(draw.outerBounds(), cs.innerBounds(), /* exact */ true);
1342 }
1343
1344 // We mark this true once we have a coverage FP (since complex clipping is occurring), or we
1345 // have an element that wouldn't affect the scissored draw bounds, but does affect the regular
1346 // draw bounds. In that case, the scissor is sufficient for clipping and we can skip the
1347 // element but definitely cannot then drop the scissor.
1348 bool scissorIsNeeded = SkToBool(cs.shader());
1349
1350 int remainingAnalyticFPs = kMaxAnalyticFPs;
Michael Ludwigb28e1412020-09-18 15:07:49 -04001351 if (hasUserStencilSettings) {
1352 // Disable analytic clips when there are user stencil settings to ensure the clip is
1353 // respected in the stencil buffer.
Michael Ludwiga195d102020-09-15 14:51:52 -04001354 remainingAnalyticFPs = 0;
Chris Dalton537293bf2021-05-03 15:54:24 -06001355 // If we have user stencil settings, stencil needs to be supported.
1356 SkASSERT(rtc->asRenderTargetProxy()->canUseStencil(*context->priv().caps()));
Michael Ludwiga195d102020-09-15 14:51:52 -04001357 }
1358
1359 // If window rectangles are supported, we can use them to exclude inner bounds of difference ops
Brian Salomon70fe17e2020-11-30 14:33:58 -05001360 int maxWindowRectangles = rtc->maxWindowRectangles();
Michael Ludwiga195d102020-09-15 14:51:52 -04001361 GrWindowRectangles windowRects;
1362
1363 // Elements not represented as an analytic FP or skipped will be collected here and later
Derek Sollenberger396cd1d2021-04-02 15:44:36 +00001364 // applied by using the stencil buffer, CCPR clip atlas, or a cached SW mask.
Michael Ludwiga195d102020-09-15 14:51:52 -04001365 SkSTArray<kNumStackMasks, const Element*> elementsForMask;
Derek Sollenberger396cd1d2021-04-02 15:44:36 +00001366 SkSTArray<kNumStackMasks, const RawElement*> elementsForAtlas;
Michael Ludwiga195d102020-09-15 14:51:52 -04001367
1368 bool maskRequiresAA = false;
Derek Sollenberger396cd1d2021-04-02 15:44:36 +00001369 auto* ccpr = context->priv().drawingManager()->getCoverageCountingPathRenderer();
Michael Ludwiga195d102020-09-15 14:51:52 -04001370
1371 int i = fElements.count();
1372 for (const RawElement& e : fElements.ritems()) {
1373 --i;
1374 if (i < cs.oldestElementIndex()) {
1375 // All earlier elements have been invalidated by elements already processed
1376 break;
1377 } else if (e.isInvalid()) {
1378 continue;
1379 }
1380
1381 switch (get_clip_geometry(e, draw)) {
1382 case ClipGeometry::kEmpty:
1383 // This can happen for difference op elements that have a larger fInnerBounds than
1384 // can be preserved at the next level.
1385 return Effect::kClippedOut;
1386
1387 case ClipGeometry::kBOnly:
1388 // We don't need to produce a coverage FP or mask for the element
1389 break;
1390
1391 case ClipGeometry::kAOnly:
1392 // Shouldn't happen for draws, fall through to regular element processing
1393 SkASSERT(false);
1394 [[fallthrough]];
1395
1396 case ClipGeometry::kBoth: {
1397 // The element must apply coverage to the draw, enable the scissor to limit overdraw
1398 scissorIsNeeded = true;
1399
1400 // First apply using HW methods (scissor and window rects). When the inner and outer
1401 // bounds match, nothing else needs to be done.
1402 bool fullyApplied = false;
1403 if (e.op() == SkClipOp::kIntersect) {
1404 // The second test allows clipped draws that are scissored by multiple elements
1405 // to remain scissor-only.
1406 fullyApplied = e.innerBounds() == e.outerBounds() ||
1407 e.innerBounds().contains(scissorBounds);
1408 } else {
Robert Phillipsc4fbc8d2020-11-30 10:17:53 -05001409 if (!e.innerBounds().isEmpty() && windowRects.count() < maxWindowRectangles) {
Michael Ludwiga195d102020-09-15 14:51:52 -04001410 // TODO: If we have more difference ops than available window rects, we
1411 // should prioritize those with the largest inner bounds.
1412 windowRects.addWindow(e.innerBounds());
1413 fullyApplied = e.innerBounds() == e.outerBounds();
1414 }
1415 }
1416
1417 if (!fullyApplied && remainingAnalyticFPs > 0) {
1418 std::tie(fullyApplied, clipFP) = analytic_clip_fp(e.asElement(),
1419 *caps->shaderCaps(),
1420 std::move(clipFP));
1421 if (fullyApplied) {
1422 remainingAnalyticFPs--;
Derek Sollenberger396cd1d2021-04-02 15:44:36 +00001423 } else if (ccpr && e.aa() == GrAA::kYes) {
Chris Dalton1e7b2e52021-03-09 21:29:32 -07001424 constexpr static int64_t kMaxClipPathArea =
1425 GrCoverageCountingPathRenderer::kMaxClipPathArea;
1426 SkIRect maskBounds;
1427 if (maskBounds.intersect(e.outerBounds(), draw.outerBounds()) &&
1428 maskBounds.height64() * maskBounds.width64() < kMaxClipPathArea) {
1429 // While technically the element is turned into a mask, each atlas entry
1430 // counts towards the FP complexity of the clip.
Derek Sollenberger396cd1d2021-04-02 15:44:36 +00001431 // TODO - CCPR needs a stable ops task ID so we can't create FPs until
1432 // we know any other mask generation is finished. It also only works
1433 // with AA shapes, future atlas systems can improve on this.
1434 elementsForAtlas.push_back(&e);
Chris Dalton1e7b2e52021-03-09 21:29:32 -07001435 remainingAnalyticFPs--;
1436 fullyApplied = true;
1437 }
Michael Ludwiga195d102020-09-15 14:51:52 -04001438 }
1439 }
1440
1441 if (!fullyApplied) {
1442 elementsForMask.push_back(&e.asElement());
1443 maskRequiresAA |= (e.aa() == GrAA::kYes);
1444 }
1445
1446 break;
1447 }
1448 }
1449 }
1450
1451 if (!scissorIsNeeded) {
1452 // More detailed analysis of the element shapes determined no clip is needed
Derek Sollenberger396cd1d2021-04-02 15:44:36 +00001453 SkASSERT(elementsForMask.empty() && elementsForAtlas.empty() && !clipFP);
Michael Ludwiga195d102020-09-15 14:51:52 -04001454 return Effect::kUnclipped;
1455 }
1456
1457 // Fill out the GrAppliedClip with what we know so far, possibly with a tightened scissor
1458 if (cs.op() == SkClipOp::kIntersect &&
Derek Sollenberger396cd1d2021-04-02 15:44:36 +00001459 (!elementsForMask.empty() || !elementsForAtlas.empty())) {
Michael Ludwiga195d102020-09-15 14:51:52 -04001460 SkAssertResult(scissorBounds.intersect(draw.outerBounds()));
1461 }
1462 if (!GrClip::IsInsideClip(scissorBounds, *bounds)) {
Derek Sollenberger396cd1d2021-04-02 15:44:36 +00001463 out->hardClip().addScissor(scissorBounds, bounds);
Michael Ludwiga195d102020-09-15 14:51:52 -04001464 }
1465 if (!windowRects.empty()) {
1466 out->hardClip().addWindowRectangles(windowRects, GrWindowRectsState::Mode::kExclusive);
1467 }
1468
1469 // Now rasterize any remaining elements, either to the stencil or a SW mask. All elements are
1470 // flattened into a single mask.
1471 if (!elementsForMask.empty()) {
Chris Dalton537293bf2021-05-03 15:54:24 -06001472 bool stencilUnavailable =
1473 !rtc->asRenderTargetProxy()->canUseStencil(*context->priv().caps());
Michael Ludwiga195d102020-09-15 14:51:52 -04001474
1475 bool hasSWMask = false;
1476 if ((rtc->numSamples() <= 1 && maskRequiresAA) || stencilUnavailable) {
1477 // Must use a texture mask to represent the combined clip elements since the stencil
1478 // cannot be used, or cannot handle smooth clips.
1479 std::tie(hasSWMask, clipFP) = GetSWMaskFP(
1480 context, &fMasks, cs, scissorBounds, elementsForMask.begin(),
1481 elementsForMask.count(), std::move(clipFP));
1482 }
1483
1484 if (!hasSWMask) {
1485 if (stencilUnavailable) {
1486 SkDebugf("WARNING: Clip mask requires stencil, but stencil unavailable. "
1487 "Draw will be ignored.\n");
1488 return Effect::kClippedOut;
1489 } else {
1490 // Rasterize the remaining elements to the stencil buffer
1491 render_stencil_mask(context, rtc, cs.genID(), scissorBounds,
1492 elementsForMask.begin(), elementsForMask.count(), out);
1493 }
1494 }
1495 }
1496
Derek Sollenberger396cd1d2021-04-02 15:44:36 +00001497 // Finish CCPR paths now that the render target's ops task is stable.
1498 if (!elementsForAtlas.empty()) {
1499 uint32_t opsTaskID = rtc->getOpsTask()->uniqueID();
1500 for (int i = 0; i < elementsForAtlas.count(); ++i) {
1501 SkASSERT(elementsForAtlas[i]->aa() == GrAA::kYes);
1502 bool success;
1503 std::tie(success, clipFP) = clip_atlas_fp(ccpr, opsTaskID, scissorBounds,
1504 elementsForAtlas[i]->asElement(),
1505 elementsForAtlas[i]->devicePath(), *caps,
1506 std::move(clipFP));
1507 if (!success) {
1508 return Effect::kClippedOut;
1509 }
1510 }
1511 }
1512
Michael Ludwiga195d102020-09-15 14:51:52 -04001513 if (clipFP) {
Derek Sollenberger396cd1d2021-04-02 15:44:36 +00001514 // This will include all analytic FPs, all CCPR atlas FPs, and a SW mask FP.
Michael Ludwiga195d102020-09-15 14:51:52 -04001515 out->addCoverageFP(std::move(clipFP));
1516 }
1517
Derek Sollenberger396cd1d2021-04-02 15:44:36 +00001518 SkASSERT(out->doesClip());
Michael Ludwiga195d102020-09-15 14:51:52 -04001519 return Effect::kClipped;
1520}
1521
1522GrClipStack::SaveRecord& GrClipStack::writableSaveRecord(bool* wasDeferred) {
1523 SaveRecord& current = fSaves.back();
1524 if (current.canBeUpdated()) {
1525 // Current record is still open, so it can be modified directly
1526 *wasDeferred = false;
1527 return current;
1528 } else {
1529 // Must undefer the save to get a new record.
1530 SkAssertResult(current.popSave());
1531 *wasDeferred = true;
1532 return fSaves.emplace_back(current, fMasks.count(), fElements.count());
1533 }
1534}
1535
1536void GrClipStack::clipShader(sk_sp<SkShader> shader) {
1537 // Shaders can't bring additional coverage
1538 if (this->currentSaveRecord().state() == ClipState::kEmpty) {
1539 return;
1540 }
1541
1542 bool wasDeferred;
1543 this->writableSaveRecord(&wasDeferred).addShader(std::move(shader));
1544 // Masks and geometry elements are not invalidated by updating the clip shader
1545}
1546
1547void GrClipStack::replaceClip(const SkIRect& rect) {
1548 bool wasDeferred;
1549 SaveRecord& save = this->writableSaveRecord(&wasDeferred);
1550
1551 if (!wasDeferred) {
1552 save.removeElements(&fElements);
1553 save.invalidateMasks(fProxyProvider, &fMasks);
1554 }
1555
1556 save.reset(fDeviceBounds);
1557 if (rect != fDeviceBounds) {
1558 this->clipRect(SkMatrix::I(), SkRect::Make(rect), GrAA::kNo, SkClipOp::kIntersect);
1559 }
1560}
1561
1562void GrClipStack::clip(RawElement&& element) {
1563 if (this->currentSaveRecord().state() == ClipState::kEmpty) {
1564 return;
1565 }
1566
1567 // Reduce the path to anything simpler, will apply the transform if it's a scale+translate
1568 // and ensures the element's bounds are clipped to the device (NOT the conservative clip bounds,
1569 // since those are based on the net effect of all elements while device bounds clipping happens
1570 // implicitly. During addElement, we may still be able to invalidate some older elements).
1571 element.simplify(fDeviceBounds, fForceAA);
1572 SkASSERT(!element.shape().inverted());
1573
1574 // An empty op means do nothing (for difference), or close the save record, so we try and detect
1575 // that early before doing additional unnecessary save record allocation.
1576 if (element.shape().isEmpty()) {
1577 if (element.op() == SkClipOp::kDifference) {
1578 // If the shape is empty and we're subtracting, this has no effect on the clip
1579 return;
1580 }
1581 // else we will make the clip empty, but we need a new save record to record that change
1582 // in the clip state; fall through to below and updateForElement() will handle it.
1583 }
1584
1585 bool wasDeferred;
1586 SaveRecord& save = this->writableSaveRecord(&wasDeferred);
1587 SkDEBUGCODE(uint32_t oldGenID = save.genID();)
1588 SkDEBUGCODE(int elementCount = fElements.count();)
1589 if (!save.addElement(std::move(element), &fElements)) {
1590 if (wasDeferred) {
1591 // We made a new save record, but ended up not adding an element to the stack.
1592 // So instead of keeping an empty save record around, pop it off and restore the counter
1593 SkASSERT(elementCount == fElements.count());
1594 fSaves.pop_back();
1595 fSaves.back().pushSave();
1596 } else {
1597 // Should not have changed gen ID if the element and save were not modified
1598 SkASSERT(oldGenID == save.genID());
1599 }
1600 } else {
1601 // The gen ID should be new, and should not be invalid
1602 SkASSERT(oldGenID != save.genID() && save.genID() != kInvalidGenID);
1603 if (fProxyProvider && !wasDeferred) {
1604 // We modified an active save record so any old masks it had can be invalidated
1605 save.invalidateMasks(fProxyProvider, &fMasks);
1606 }
1607 }
1608}
1609
1610GrFPResult GrClipStack::GetSWMaskFP(GrRecordingContext* context, Mask::Stack* masks,
1611 const SaveRecord& current, const SkIRect& bounds,
1612 const Element** elements, int count,
1613 std::unique_ptr<GrFragmentProcessor> clipFP) {
1614 GrProxyProvider* proxyProvider = context->priv().proxyProvider();
Brian Salomonc85bce82020-12-29 09:32:52 -05001615 GrSurfaceProxyView maskProxy;
Michael Ludwiga195d102020-09-15 14:51:52 -04001616
1617 SkIRect maskBounds; // may not be 'bounds' if we reuse a large clip mask
1618 // Check the existing masks from this save record for compatibility
1619 for (const Mask& m : masks->ritems()) {
1620 if (m.genID() != current.genID()) {
1621 break;
1622 }
1623 if (m.appliesToDraw(current, bounds)) {
Brian Salomonc85bce82020-12-29 09:32:52 -05001624 maskProxy = proxyProvider->findCachedProxyWithColorTypeFallback(
1625 m.key(), kMaskOrigin, GrColorType::kAlpha_8, 1);
1626 if (maskProxy) {
Michael Ludwiga195d102020-09-15 14:51:52 -04001627 maskBounds = m.bounds();
1628 break;
1629 }
1630 }
1631 }
1632
Brian Salomonc85bce82020-12-29 09:32:52 -05001633 if (!maskProxy) {
Michael Ludwiga195d102020-09-15 14:51:52 -04001634 // No existing mask was found, so need to render a new one
Brian Salomonc85bce82020-12-29 09:32:52 -05001635 maskProxy = render_sw_mask(context, bounds, elements, count);
1636 if (!maskProxy) {
Michael Ludwiga195d102020-09-15 14:51:52 -04001637 // If we still don't have one, there's nothing we can do
1638 return GrFPFailure(std::move(clipFP));
1639 }
1640
1641 // Register the mask for later invalidation
1642 Mask& mask = masks->emplace_back(current, bounds);
Brian Salomonc85bce82020-12-29 09:32:52 -05001643 proxyProvider->assignUniqueKeyToProxy(mask.key(), maskProxy.asTextureProxy());
Michael Ludwiga195d102020-09-15 14:51:52 -04001644 maskBounds = bounds;
1645 }
1646
1647 // Wrap the mask in an FP that samples it for coverage
Brian Salomonc85bce82020-12-29 09:32:52 -05001648 SkASSERT(maskProxy && maskProxy.origin() == kMaskOrigin);
Michael Ludwiga195d102020-09-15 14:51:52 -04001649
1650 GrSamplerState samplerState(GrSamplerState::WrapMode::kClampToBorder,
1651 GrSamplerState::Filter::kNearest);
1652 // Maps the device coords passed to the texture effect to the top-left corner of the mask, and
1653 // make sure that the draw bounds are pre-mapped into the mask's space as well.
1654 auto m = SkMatrix::Translate(-maskBounds.fLeft, -maskBounds.fTop);
1655 auto subset = SkRect::Make(bounds);
1656 subset.offset(-maskBounds.fLeft, -maskBounds.fTop);
1657 // We scissor to bounds. The mask's texel centers are aligned to device space
1658 // pixel centers. Hence this domain of texture coordinates.
1659 auto domain = subset.makeInset(0.5, 0.5);
Brian Salomonc85bce82020-12-29 09:32:52 -05001660 auto fp = GrTextureEffect::MakeSubset(std::move(maskProxy), kPremul_SkAlphaType, m,
1661 samplerState, subset, domain, *context->priv().caps());
Michael Ludwiga195d102020-09-15 14:51:52 -04001662 fp = GrDeviceSpaceEffect::Make(std::move(fp));
1663
1664 // Must combine the coverage sampled from the texture effect with the previous coverage
Brian Salomonb43d6992021-01-05 14:37:40 -05001665 fp = GrBlendFragmentProcessor::Make(std::move(fp), std::move(clipFP), SkBlendMode::kDstIn);
Michael Ludwiga195d102020-09-15 14:51:52 -04001666 return GrFPSuccess(std::move(fp));
1667}