blob: cce759d680c314d206ff2cd2af8e6305b9b8eda3 [file] [log] [blame]
Yuqian Lidf60e362017-07-25 11:26:31 -04001/*
2 * Copyright 2016 The Android Open Source Project
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "SkAnalyticEdge.h"
9#include "SkAntiRun.h"
10#include "SkAutoMalloc.h"
11#include "SkBlitter.h"
12#include "SkCoverageDelta.h"
13#include "SkEdge.h"
14#include "SkEdgeBuilder.h"
15#include "SkGeometry.h"
16#include "SkMask.h"
17#include "SkPath.h"
18#include "SkQuadClipper.h"
19#include "SkRasterClip.h"
20#include "SkRegion.h"
21#include "SkScan.h"
22#include "SkScanPriv.h"
23#include "SkTSort.h"
24#include "SkTemplates.h"
25#include "SkUtils.h"
26
27///////////////////////////////////////////////////////////////////////////////
28
29/*
30
31DAA stands for Delta-based Anti-Aliasing.
32
33This is an improved version of our analytic AA algorithm (in SkScan_AAAPath.cpp)
34which first scan convert paths into coverage deltas (this step can happen out of order,
35and we don't seem to be needed to worry about the intersection, clamping, etc.),
36and then use a single blitter run to convert all those deltas into the final alphas.
37
38Before we go to the final blitter run, we'll use SkFixed for all delta values so we
39don't ever have to worry about under/overflow.
40
41*/
42
43///////////////////////////////////////////////////////////////////////////////
44
45// The following helper functions are the same as those from SkScan_AAAPath.cpp
46// except that we use SkFixed everywhere instead of SkAlpha.
47
48static inline SkFixed SkFixedMul_lowprec(SkFixed a, SkFixed b) {
49 return (a >> 8) * (b >> 8);
50}
51
52// Return the alpha of a trapezoid whose height is 1
53static inline SkFixed trapezoidToAlpha(SkFixed l1, SkFixed l2) {
54 SkASSERT(l1 >= 0 && l2 >= 0);
55 return (l1 + l2) >> 1;
56}
57
58// The alpha of right-triangle (a, a*b)
59static inline SkFixed partialTriangleToAlpha(SkFixed a, SkFixed b) {
60 SkASSERT(a <= SK_Fixed1);
61 // SkFixedMul(SkFixedMul(a, a), b) >> 1
62 // return ((((a >> 8) * (a >> 8)) >> 8) * (b >> 8)) >> 1;
63 return (a >> 11) * (a >> 11) * (b >> 11);
64}
65
66static inline SkFixed getPartialAlpha(SkFixed alpha, SkFixed partialMultiplier) {
67 // DAA should not be so sensitive to the precision (compared to AAA).
68 return SkFixedMul_lowprec(alpha, partialMultiplier);
69}
70
71///////////////////////////////////////////////////////////////////////////////
72
73template<bool isPartial, class Deltas>
74static inline void add_coverage_delta_segment(int y, SkFixed rowHeight, const SkAnalyticEdge* edge,
75 SkFixed nextX, Deltas* deltas) { // rowHeight=fullAlpha
76 SkASSERT(rowHeight <= SK_Fixed1 && rowHeight >= 0);
77
78 // Let's see if multiplying sign is faster than multiplying edge->fWinding.
79 // (Compiler should be able to optimize multiplication with 1/-1?)
80 int sign = edge->fWinding == 1 ? 1 : -1;
81
82 SkFixed l = SkTMin(edge->fX, nextX);
83 SkFixed r = edge->fX + nextX - l;
84 int L = SkFixedFloorToInt(l);
85 int R = SkFixedCeilToInt(r);
86 int len = R - L;
87
88 switch (len) {
89 case 0: {
90 deltas->addDelta(L, y, rowHeight * sign);
91 return;
92 }
93 case 1: {
94 SkFixed fixedR = SkIntToFixed(R);
95 SkFixed alpha = trapezoidToAlpha(fixedR - l, fixedR - r);
96 if (isPartial) {
97 alpha = getPartialAlpha(alpha, rowHeight);
98 }
99 deltas->addDelta(L, y, alpha * sign);
100 deltas->addDelta(L + 1, y, (rowHeight - alpha) * sign);
101 return;
102 }
103 case 2: {
104 SkFixed middle = SkIntToFixed(L + 1);
105 SkFixed x1 = middle - l;
106 SkFixed x2 = r - middle;
107 SkFixed alpha1 = partialTriangleToAlpha(x1, edge->fDY);
108 SkFixed alpha2 = rowHeight - partialTriangleToAlpha(x2, edge->fDY);
109 deltas->addDelta(L, y, alpha1 * sign);
110 deltas->addDelta(L + 1, y, (alpha2 - alpha1) * sign);
111 deltas->addDelta(L + 2, y, (rowHeight - alpha2) * sign);
112 return;
113 }
114 }
115
116 // When len > 2, computations are similar to computeAlphaAboveLine in SkScan_AAAPath.cpp
117 SkFixed dY = edge->fDY;
118 SkFixed fixedL = SkIntToFixed(L);
119 SkFixed fixedR = SkIntToFixed(R);
120 SkFixed first = SK_Fixed1 + fixedL - l; // horizontal edge of the left-most triangle
121 SkFixed last = r - (fixedR - SK_Fixed1); // horizontal edge of the right-most triangle
122 SkFixed firstH = SkFixedMul_lowprec(first, dY); // vertical edge of the left-most triangle
123
124 SkFixed alpha0 = SkFixedMul_lowprec(first, firstH) >> 1; // triangle alpha
125 SkFixed alpha1 = firstH + (dY >> 1); // rectangle plus triangle
126 deltas->addDelta(L, y, alpha0 * sign);
127 deltas->addDelta(L + 1, y, (alpha1 - alpha0) * sign);
128 for(int i = 2; i < len - 1; ++i) {
129 deltas->addDelta(L + i, y, dY * sign); // the increment is always a rect of height = dY
130 }
131
132 SkFixed alphaR2 = alpha1 + dY * (len - 3); // the alpha at R - 2
133 SkFixed lastAlpha = rowHeight - partialTriangleToAlpha(last, dY); // the alpha at R - 1
134 deltas->addDelta(R - 1, y, (lastAlpha - alphaR2) * sign);
135 deltas->addDelta(R, y, (rowHeight - lastAlpha) * sign);
136}
137
138class XLessThan {
139public:
140 bool operator()(const SkBezier* a, const SkBezier* b) {
141 return a->fP0.fX + a->fP1.fX < b->fP0.fX + b->fP1.fX;
142 }
143};
144
145class YLessThan {
146public:
147 bool operator()(const SkBezier* a, const SkBezier* b) {
148 return a->fP0.fY + a->fP1.fY < b->fP0.fY + b->fP1.fY;
149 }
150};
151
152template<class Deltas> static SK_ALWAYS_INLINE
Yuqian Li8b0ef632018-03-08 13:39:27 -0500153void gen_alpha_deltas(const SkPath& path, const SkIRect& clippedIR, const SkIRect& clipBounds,
154 Deltas& result, SkBlitter* blitter, bool skipRect, bool pathContainedInClip) {
Yuqian Lidf60e362017-07-25 11:26:31 -0400155 // 1. Build edges
156 SkEdgeBuilder builder;
Yuqian Li8b0ef632018-03-08 13:39:27 -0500157 // We have to use clipBounds instead of clippedIR to build edges because of "canCullToTheRight":
158 // if the builder finds a right edge past the right clip, it won't build that right edge.
159 int count = builder.build_edges(path, &clipBounds, 0, pathContainedInClip,
160 SkEdgeBuilder::kBezier);
161
Yuqian Lidf60e362017-07-25 11:26:31 -0400162 if (count == 0) {
163 return;
164 }
165 SkBezier** list = builder.bezierList();
166
167 // 2. Try to find the rect part because blitAntiRect is so much faster than blitCoverageDeltas
Yuqian Li8b0ef632018-03-08 13:39:27 -0500168 int rectTop = clippedIR.fBottom; // the rect is initialized to be empty as top = bot
169 int rectBot = clippedIR.fBottom;
Yuqian Lidf60e362017-07-25 11:26:31 -0400170 if (skipRect) { // only find that rect is skipRect == true
171 YLessThan lessThan; // sort edges in YX order
172 SkTQSort(list, list + count - 1, lessThan);
173 for(int i = 0; i < count - 1; ++i) {
174 SkBezier* lb = list[i];
175 SkBezier* rb = list[i + 1];
176
Yuqian Lic5e4e742017-09-18 14:38:43 -0400177 // fCount == 2 ensures that lb and rb are lines instead of quads or cubics.
178 bool lDX0 = lb->fP0.fX == lb->fP1.fX && lb->fCount == 2;
179 bool rDX0 = rb->fP0.fX == rb->fP1.fX && rb->fCount == 2;
Yuqian Lidf60e362017-07-25 11:26:31 -0400180 if (!lDX0 || !rDX0) { // make sure that the edges are vertical
181 continue;
182 }
183
184 SkAnalyticEdge l, r;
185 l.setLine(lb->fP0, lb->fP1);
186 r.setLine(rb->fP0, rb->fP1);
187
188 SkFixed xorUpperY = l.fUpperY ^ r.fUpperY;
189 SkFixed xorLowerY = l.fLowerY ^ r.fLowerY;
190 if ((xorUpperY | xorLowerY) == 0) { // equal upperY and lowerY
191 rectTop = SkFixedCeilToInt(l.fUpperY);
192 rectBot = SkFixedFloorToInt(l.fLowerY);
193 if (rectBot > rectTop) { // if bot == top, the rect is too short for blitAntiRect
194 int L = SkFixedCeilToInt(l.fUpperX);
195 int R = SkFixedFloorToInt(r.fUpperX);
196 if (L <= R) {
197 SkAlpha la = (SkIntToFixed(L) - l.fUpperX) >> 8;
198 SkAlpha ra = (r.fUpperX - SkIntToFixed(R)) >> 8;
199 result.setAntiRect(L - 1, rectTop, R - L, rectBot - rectTop, la, ra);
200 } else { // too thin to use blitAntiRect; reset the rect region to be emtpy
Yuqian Li8b0ef632018-03-08 13:39:27 -0500201 rectTop = rectBot = clippedIR.fBottom;
Yuqian Lidf60e362017-07-25 11:26:31 -0400202 }
203 }
204 break;
205 }
206
207 }
208 }
209
210 // 3. Sort edges in x so we may need less sorting for delta based on x. This only helps
211 // SkCoverageDeltaList. And we don't want to sort more than SORT_THRESHOLD edges where
212 // the log(count) factor of the quick sort may become a bottleneck; when there are so
213 // many edges, we're unlikely to make deltas sorted anyway.
Yuqian Lidb96f252017-08-02 10:32:55 -0400214 constexpr int SORT_THRESHOLD = 256;
Yuqian Lidf60e362017-07-25 11:26:31 -0400215 if (std::is_same<Deltas, SkCoverageDeltaList>::value && count < SORT_THRESHOLD) {
216 XLessThan lessThan;
217 SkTQSort(list, list + count - 1, lessThan);
218 }
219
220 // Future todo: parallize and SIMD the following code.
221 // 4. iterate through edges and generate deltas
222 for(int index = 0; index < count; ++index) {
223 SkAnalyticCubicEdge storage;
224 SkASSERT(sizeof(SkAnalyticQuadraticEdge) >= sizeof(SkAnalyticEdge));
225 SkASSERT(sizeof(SkAnalyticCubicEdge) >= sizeof(SkAnalyticQuadraticEdge));
226
227 SkBezier* bezier = list[index];
228 SkAnalyticEdge* currE = &storage;
229 bool edgeSet = false;
230
Yuqian Li5eb8fc52017-08-08 14:00:52 -0400231 int originalWinding = 1;
232 bool sortY = true;
Yuqian Lidf60e362017-07-25 11:26:31 -0400233 switch (bezier->fCount) {
234 case 2: {
235 edgeSet = currE->setLine(bezier->fP0, bezier->fP1);
Yuqian Li5eb8fc52017-08-08 14:00:52 -0400236 originalWinding = currE->fWinding;
Yuqian Lidf60e362017-07-25 11:26:31 -0400237 break;
238 }
239 case 3: {
240 SkQuad* quad = static_cast<SkQuad*>(bezier);
241 SkPoint pts[3] = {quad->fP0, quad->fP1, quad->fP2};
242 edgeSet = static_cast<SkAnalyticQuadraticEdge*>(currE)->setQuadratic(pts);
Yuqian Li5eb8fc52017-08-08 14:00:52 -0400243 originalWinding = static_cast<SkAnalyticQuadraticEdge*>(currE)->fQEdge.fWinding;
Yuqian Lidf60e362017-07-25 11:26:31 -0400244 break;
245 }
246 case 4: {
Yuqian Li5eb8fc52017-08-08 14:00:52 -0400247 sortY = false;
Yuqian Lidf60e362017-07-25 11:26:31 -0400248 SkCubic* cubic = static_cast<SkCubic*>(bezier);
249 SkPoint pts[4] = {cubic->fP0, cubic->fP1, cubic->fP2, cubic->fP3};
Yuqian Li5eb8fc52017-08-08 14:00:52 -0400250 edgeSet = static_cast<SkAnalyticCubicEdge*>(currE)->setCubic(pts, sortY);
251 originalWinding = static_cast<SkAnalyticCubicEdge*>(currE)->fCEdge.fWinding;
Yuqian Lidf60e362017-07-25 11:26:31 -0400252 break;
253 }
254 }
255
256 if (!edgeSet) {
257 continue;
258 }
259
260 do {
261 currE->fX = currE->fUpperX;
262
263 SkFixed upperFloor = SkFixedFloorToFixed(currE->fUpperY);
264 SkFixed lowerCeil = SkFixedCeilToFixed(currE->fLowerY);
265 int iy = SkFixedFloorToInt(upperFloor);
266
267 if (lowerCeil <= upperFloor + SK_Fixed1) { // only one row is affected by the currE
268 SkFixed rowHeight = currE->fLowerY - currE->fUpperY;
269 SkFixed nextX = currE->fX + SkFixedMul(currE->fDX, rowHeight);
Yuqian Li8b0ef632018-03-08 13:39:27 -0500270 if (iy >= clippedIR.fTop && iy < clippedIR.fBottom) {
Yuqian Li5eb8fc52017-08-08 14:00:52 -0400271 add_coverage_delta_segment<true>(iy, rowHeight, currE, nextX, &result);
272 }
Yuqian Lidf60e362017-07-25 11:26:31 -0400273 continue;
274 }
275
276 // check first row
277 SkFixed rowHeight = upperFloor + SK_Fixed1 - currE->fUpperY;
278 SkFixed nextX;
279 if (rowHeight != SK_Fixed1) { // it's a partial row
280 nextX = currE->fX + SkFixedMul(currE->fDX, rowHeight);
281 add_coverage_delta_segment<true>(iy, rowHeight, currE, nextX, &result);
282 } else { // it's a full row so we can leave it to the while loop
283 iy--; // compensate the iy++ in the while loop
284 nextX = currE->fX;
285 }
286
287 while (true) { // process the full rows in the middle
288 iy++;
289 SkFixed y = SkIntToFixed(iy);
290 currE->fX = nextX;
291 nextX += currE->fDX;
292
293 if (y + SK_Fixed1 > currE->fLowerY) {
294 break; // no full rows left, break
295 }
296
297 // Check whether we're in the rect part that will be covered by blitAntiRect
298 if (iy >= rectTop && iy < rectBot) {
299 SkASSERT(currE->fDX == 0); // If yes, we must be on an edge with fDX = 0.
300 iy = rectBot - 1; // Skip the rect part by advancing iy to the bottom.
301 continue;
302 }
303
304 // Add current edge's coverage deltas on this full row
305 add_coverage_delta_segment<false>(iy, SK_Fixed1, currE, nextX, &result);
306 }
307
308 // last partial row
Yuqian Li3a5e1fe2017-09-28 10:58:38 -0400309 if (SkIntToFixed(iy) < currE->fLowerY &&
Yuqian Li8b0ef632018-03-08 13:39:27 -0500310 iy >= clippedIR.fTop && iy < clippedIR.fBottom) {
Yuqian Lidf60e362017-07-25 11:26:31 -0400311 rowHeight = currE->fLowerY - SkIntToFixed(iy);
312 nextX = currE->fX + SkFixedMul(currE->fDX, rowHeight);
313 add_coverage_delta_segment<true>(iy, rowHeight, currE, nextX, &result);
314 }
Yuqian Li5eb8fc52017-08-08 14:00:52 -0400315 // Intended assignment to fWinding to restore the maybe-negated winding (during updateLine)
316 } while ((currE->fWinding = originalWinding) && currE->update(currE->fLowerY, sortY));
Yuqian Lidf60e362017-07-25 11:26:31 -0400317 }
318}
319
Yuqian Lib49d7b02017-11-06 16:21:06 -0500320void SkScan::DAAFillPath(const SkPath& path, SkBlitter* blitter, const SkIRect& ir,
Yuqian Li36fa0ac2018-02-12 17:02:30 +0800321 const SkIRect& clipBounds, bool forceRLE, SkDAARecord* record) {
Yuqian Lif4848822018-01-08 13:40:45 -0500322 bool containedInClip = clipBounds.contains(ir);
Yuqian Lib49d7b02017-11-06 16:21:06 -0500323 bool isEvenOdd = path.getFillType() & 1;
324 bool isConvex = path.isConvex();
325 bool isInverse = path.isInverseFillType();
326 bool skipRect = isConvex && !isInverse;
Yuqian Li36fa0ac2018-02-12 17:02:30 +0800327 bool isInitOnce = record && record->fType == SkDAARecord::Type::kToBeComputed;
Yuqian Lidf60e362017-07-25 11:26:31 -0400328
Yuqian Lib49d7b02017-11-06 16:21:06 -0500329 SkIRect clippedIR = ir;
330 clippedIR.intersect(clipBounds);
Yuqian Lidf60e362017-07-25 11:26:31 -0400331
Yuqian Lib49d7b02017-11-06 16:21:06 -0500332 // The overhead of even constructing SkCoverageDeltaList/Mask is too big.
333 // So TryBlitFatAntiRect and return if it's successful.
334 if (!isInverse && TryBlitFatAntiRect(blitter, path, clipBounds)) {
Yuqian Lia445fdd2018-03-01 16:53:27 -0800335 SkDAARecord::SetEmpty(record);
Yuqian Lib49d7b02017-11-06 16:21:06 -0500336 return;
337 }
Yuqian Lidf60e362017-07-25 11:26:31 -0400338
Mike Klein6613cc52017-12-19 09:09:33 -0500339#ifdef SK_BUILD_FOR_GOOGLE3
Yuqian Lib49d7b02017-11-06 16:21:06 -0500340 constexpr int STACK_SIZE = 12 << 10; // 12K stack size alloc; Google3 has 16K limit.
Yuqian Libd40a5b2017-09-01 15:38:45 -0400341#else
Yuqian Lib49d7b02017-11-06 16:21:06 -0500342 constexpr int STACK_SIZE = 64 << 10; // 64k stack size to avoid heap allocation
Yuqian Libd40a5b2017-09-01 15:38:45 -0400343#endif
Yuqian Li36fa0ac2018-02-12 17:02:30 +0800344 SkSTArenaAlloc<STACK_SIZE> stackAlloc; // avoid heap allocation with SkSTArenaAlloc
Yuqian Libd40a5b2017-09-01 15:38:45 -0400345
Yuqian Li36fa0ac2018-02-12 17:02:30 +0800346 // Set alloc to record's alloc if and only if we're in the init-once phase. We have to do that
347 // during init phase because the mask or list needs to live longer. We can't do that during blit
348 // phase because the same record could be accessed by multiple threads simultaneously.
349 SkArenaAlloc* alloc = isInitOnce ? record->fAlloc : &stackAlloc;
350
351 if (record == nullptr) {
352 record = alloc->make<SkDAARecord>(alloc);
353 }
354
355 // Only blitter->blitXXX needs to be done in order in the threaded backend. Everything else can
356 // be done out of order in the init-once phase. We do that by calling DAAFillPath twice: first
357 // with a null blitter, and then second with the real blitter and the SkMask/SkCoverageDeltaList
358 // generated in the first step.
359 if (record->fType == SkDAARecord::Type::kToBeComputed) {
360 if (!forceRLE && !isInverse && SkCoverageDeltaMask::Suitable(clippedIR)) {
361 record->fType = SkDAARecord::Type::kMask;
362 SkCoverageDeltaMask deltaMask(alloc, clippedIR);
Yuqian Li8b0ef632018-03-08 13:39:27 -0500363 gen_alpha_deltas(path, clippedIR, clipBounds, deltaMask, blitter, skipRect,
364 containedInClip);
Yuqian Li36fa0ac2018-02-12 17:02:30 +0800365 deltaMask.convertCoverageToAlpha(isEvenOdd, isInverse, isConvex);
366 record->fMask = deltaMask.prepareSkMask();
367 } else {
368 record->fType = SkDAARecord::Type::kList;
369 SkCoverageDeltaList* deltaList = alloc->make<SkCoverageDeltaList>(
370 alloc, clippedIR.fTop, clippedIR.fBottom, forceRLE);
Yuqian Li8b0ef632018-03-08 13:39:27 -0500371 gen_alpha_deltas(path, clippedIR, clipBounds, *deltaList, blitter, skipRect,
372 containedInClip);
Yuqian Li36fa0ac2018-02-12 17:02:30 +0800373 record->fList = deltaList;
374 }
375 }
376
377 if (!isInitOnce) {
378 SkASSERT(record->fType != SkDAARecord::Type::kToBeComputed);
379 if (record->fType == SkDAARecord::Type::kMask) {
380 blitter->blitMask(record->fMask, clippedIR);
381 } else {
382 blitter->blitCoverageDeltas(record->fList,
383 clipBounds, isEvenOdd, isInverse, isConvex, alloc);
384 }
Yuqian Lib49d7b02017-11-06 16:21:06 -0500385 }
Yuqian Lidf60e362017-07-25 11:26:31 -0400386}