blob: cd1717fd2df7b82b3e7900fb794613f2a81d1fc8 [file] [log] [blame]
Yuqian Lidf60e362017-07-25 11:26:31 -04001/*
2 * Copyright 2016 The Android Open Source Project
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "SkAnalyticEdge.h"
9#include "SkAntiRun.h"
10#include "SkAutoMalloc.h"
11#include "SkBlitter.h"
12#include "SkCoverageDelta.h"
13#include "SkEdge.h"
14#include "SkEdgeBuilder.h"
15#include "SkGeometry.h"
16#include "SkMask.h"
17#include "SkPath.h"
18#include "SkQuadClipper.h"
19#include "SkRasterClip.h"
20#include "SkRegion.h"
21#include "SkScan.h"
22#include "SkScanPriv.h"
23#include "SkTSort.h"
24#include "SkTemplates.h"
25#include "SkUtils.h"
26
27///////////////////////////////////////////////////////////////////////////////
28
29/*
30
31DAA stands for Delta-based Anti-Aliasing.
32
33This is an improved version of our analytic AA algorithm (in SkScan_AAAPath.cpp)
34which first scan convert paths into coverage deltas (this step can happen out of order,
35and we don't seem to be needed to worry about the intersection, clamping, etc.),
36and then use a single blitter run to convert all those deltas into the final alphas.
37
38Before we go to the final blitter run, we'll use SkFixed for all delta values so we
39don't ever have to worry about under/overflow.
40
41*/
42
43///////////////////////////////////////////////////////////////////////////////
44
45// The following helper functions are the same as those from SkScan_AAAPath.cpp
46// except that we use SkFixed everywhere instead of SkAlpha.
47
48static inline SkFixed SkFixedMul_lowprec(SkFixed a, SkFixed b) {
49 return (a >> 8) * (b >> 8);
50}
51
52// Return the alpha of a trapezoid whose height is 1
53static inline SkFixed trapezoidToAlpha(SkFixed l1, SkFixed l2) {
54 SkASSERT(l1 >= 0 && l2 >= 0);
55 return (l1 + l2) >> 1;
56}
57
58// The alpha of right-triangle (a, a*b)
59static inline SkFixed partialTriangleToAlpha(SkFixed a, SkFixed b) {
60 SkASSERT(a <= SK_Fixed1);
61 // SkFixedMul(SkFixedMul(a, a), b) >> 1
62 // return ((((a >> 8) * (a >> 8)) >> 8) * (b >> 8)) >> 1;
63 return (a >> 11) * (a >> 11) * (b >> 11);
64}
65
66static inline SkFixed getPartialAlpha(SkFixed alpha, SkFixed partialMultiplier) {
67 // DAA should not be so sensitive to the precision (compared to AAA).
68 return SkFixedMul_lowprec(alpha, partialMultiplier);
69}
70
71///////////////////////////////////////////////////////////////////////////////
72
73template<bool isPartial, class Deltas>
74static inline void add_coverage_delta_segment(int y, SkFixed rowHeight, const SkAnalyticEdge* edge,
75 SkFixed nextX, Deltas* deltas) { // rowHeight=fullAlpha
76 SkASSERT(rowHeight <= SK_Fixed1 && rowHeight >= 0);
77
78 // Let's see if multiplying sign is faster than multiplying edge->fWinding.
79 // (Compiler should be able to optimize multiplication with 1/-1?)
80 int sign = edge->fWinding == 1 ? 1 : -1;
81
82 SkFixed l = SkTMin(edge->fX, nextX);
83 SkFixed r = edge->fX + nextX - l;
84 int L = SkFixedFloorToInt(l);
85 int R = SkFixedCeilToInt(r);
86 int len = R - L;
87
88 switch (len) {
89 case 0: {
90 deltas->addDelta(L, y, rowHeight * sign);
91 return;
92 }
93 case 1: {
94 SkFixed fixedR = SkIntToFixed(R);
95 SkFixed alpha = trapezoidToAlpha(fixedR - l, fixedR - r);
96 if (isPartial) {
97 alpha = getPartialAlpha(alpha, rowHeight);
98 }
99 deltas->addDelta(L, y, alpha * sign);
100 deltas->addDelta(L + 1, y, (rowHeight - alpha) * sign);
101 return;
102 }
103 case 2: {
104 SkFixed middle = SkIntToFixed(L + 1);
105 SkFixed x1 = middle - l;
106 SkFixed x2 = r - middle;
107 SkFixed alpha1 = partialTriangleToAlpha(x1, edge->fDY);
108 SkFixed alpha2 = rowHeight - partialTriangleToAlpha(x2, edge->fDY);
109 deltas->addDelta(L, y, alpha1 * sign);
110 deltas->addDelta(L + 1, y, (alpha2 - alpha1) * sign);
111 deltas->addDelta(L + 2, y, (rowHeight - alpha2) * sign);
112 return;
113 }
114 }
115
116 // When len > 2, computations are similar to computeAlphaAboveLine in SkScan_AAAPath.cpp
117 SkFixed dY = edge->fDY;
118 SkFixed fixedL = SkIntToFixed(L);
119 SkFixed fixedR = SkIntToFixed(R);
120 SkFixed first = SK_Fixed1 + fixedL - l; // horizontal edge of the left-most triangle
121 SkFixed last = r - (fixedR - SK_Fixed1); // horizontal edge of the right-most triangle
122 SkFixed firstH = SkFixedMul_lowprec(first, dY); // vertical edge of the left-most triangle
123
124 SkFixed alpha0 = SkFixedMul_lowprec(first, firstH) >> 1; // triangle alpha
125 SkFixed alpha1 = firstH + (dY >> 1); // rectangle plus triangle
126 deltas->addDelta(L, y, alpha0 * sign);
127 deltas->addDelta(L + 1, y, (alpha1 - alpha0) * sign);
128 for(int i = 2; i < len - 1; ++i) {
129 deltas->addDelta(L + i, y, dY * sign); // the increment is always a rect of height = dY
130 }
131
132 SkFixed alphaR2 = alpha1 + dY * (len - 3); // the alpha at R - 2
133 SkFixed lastAlpha = rowHeight - partialTriangleToAlpha(last, dY); // the alpha at R - 1
134 deltas->addDelta(R - 1, y, (lastAlpha - alphaR2) * sign);
135 deltas->addDelta(R, y, (rowHeight - lastAlpha) * sign);
136}
137
138class XLessThan {
139public:
140 bool operator()(const SkBezier* a, const SkBezier* b) {
141 return a->fP0.fX + a->fP1.fX < b->fP0.fX + b->fP1.fX;
142 }
143};
144
145class YLessThan {
146public:
147 bool operator()(const SkBezier* a, const SkBezier* b) {
148 return a->fP0.fY + a->fP1.fY < b->fP0.fY + b->fP1.fY;
149 }
150};
151
152template<class Deltas> static SK_ALWAYS_INLINE
Yuqian Li3a5e1fe2017-09-28 10:58:38 -0400153void gen_alpha_deltas(const SkPath& path, const SkIRect& clipBounds, Deltas& result,
Yuqian Lidf60e362017-07-25 11:26:31 -0400154 SkBlitter* blitter, bool skipRect, bool pathContainedInClip) {
155 // 1. Build edges
156 SkEdgeBuilder builder;
157 SkIRect ir = path.getBounds().roundOut();
Yuqian Li3a5e1fe2017-09-28 10:58:38 -0400158 int count = builder.build_edges(path, &clipBounds, 0, pathContainedInClip,
Yuqian Lidf60e362017-07-25 11:26:31 -0400159 SkEdgeBuilder::kBezier);
160 if (count == 0) {
161 return;
162 }
163 SkBezier** list = builder.bezierList();
164
165 // 2. Try to find the rect part because blitAntiRect is so much faster than blitCoverageDeltas
166 int rectTop = ir.fBottom; // the rect is initialized to be empty as top = bot
167 int rectBot = ir.fBottom;
168 if (skipRect) { // only find that rect is skipRect == true
169 YLessThan lessThan; // sort edges in YX order
170 SkTQSort(list, list + count - 1, lessThan);
171 for(int i = 0; i < count - 1; ++i) {
172 SkBezier* lb = list[i];
173 SkBezier* rb = list[i + 1];
174
Yuqian Lic5e4e742017-09-18 14:38:43 -0400175 // fCount == 2 ensures that lb and rb are lines instead of quads or cubics.
176 bool lDX0 = lb->fP0.fX == lb->fP1.fX && lb->fCount == 2;
177 bool rDX0 = rb->fP0.fX == rb->fP1.fX && rb->fCount == 2;
Yuqian Lidf60e362017-07-25 11:26:31 -0400178 if (!lDX0 || !rDX0) { // make sure that the edges are vertical
179 continue;
180 }
181
182 SkAnalyticEdge l, r;
183 l.setLine(lb->fP0, lb->fP1);
184 r.setLine(rb->fP0, rb->fP1);
185
186 SkFixed xorUpperY = l.fUpperY ^ r.fUpperY;
187 SkFixed xorLowerY = l.fLowerY ^ r.fLowerY;
188 if ((xorUpperY | xorLowerY) == 0) { // equal upperY and lowerY
189 rectTop = SkFixedCeilToInt(l.fUpperY);
190 rectBot = SkFixedFloorToInt(l.fLowerY);
191 if (rectBot > rectTop) { // if bot == top, the rect is too short for blitAntiRect
192 int L = SkFixedCeilToInt(l.fUpperX);
193 int R = SkFixedFloorToInt(r.fUpperX);
194 if (L <= R) {
195 SkAlpha la = (SkIntToFixed(L) - l.fUpperX) >> 8;
196 SkAlpha ra = (r.fUpperX - SkIntToFixed(R)) >> 8;
197 result.setAntiRect(L - 1, rectTop, R - L, rectBot - rectTop, la, ra);
198 } else { // too thin to use blitAntiRect; reset the rect region to be emtpy
199 rectTop = rectBot = ir.fBottom;
200 }
201 }
202 break;
203 }
204
205 }
206 }
207
208 // 3. Sort edges in x so we may need less sorting for delta based on x. This only helps
209 // SkCoverageDeltaList. And we don't want to sort more than SORT_THRESHOLD edges where
210 // the log(count) factor of the quick sort may become a bottleneck; when there are so
211 // many edges, we're unlikely to make deltas sorted anyway.
Yuqian Lidb96f252017-08-02 10:32:55 -0400212 constexpr int SORT_THRESHOLD = 256;
Yuqian Lidf60e362017-07-25 11:26:31 -0400213 if (std::is_same<Deltas, SkCoverageDeltaList>::value && count < SORT_THRESHOLD) {
214 XLessThan lessThan;
215 SkTQSort(list, list + count - 1, lessThan);
216 }
217
218 // Future todo: parallize and SIMD the following code.
219 // 4. iterate through edges and generate deltas
220 for(int index = 0; index < count; ++index) {
221 SkAnalyticCubicEdge storage;
222 SkASSERT(sizeof(SkAnalyticQuadraticEdge) >= sizeof(SkAnalyticEdge));
223 SkASSERT(sizeof(SkAnalyticCubicEdge) >= sizeof(SkAnalyticQuadraticEdge));
224
225 SkBezier* bezier = list[index];
226 SkAnalyticEdge* currE = &storage;
227 bool edgeSet = false;
228
Yuqian Li5eb8fc52017-08-08 14:00:52 -0400229 int originalWinding = 1;
230 bool sortY = true;
Yuqian Lidf60e362017-07-25 11:26:31 -0400231 switch (bezier->fCount) {
232 case 2: {
233 edgeSet = currE->setLine(bezier->fP0, bezier->fP1);
Yuqian Li5eb8fc52017-08-08 14:00:52 -0400234 originalWinding = currE->fWinding;
Yuqian Lidf60e362017-07-25 11:26:31 -0400235 break;
236 }
237 case 3: {
238 SkQuad* quad = static_cast<SkQuad*>(bezier);
239 SkPoint pts[3] = {quad->fP0, quad->fP1, quad->fP2};
240 edgeSet = static_cast<SkAnalyticQuadraticEdge*>(currE)->setQuadratic(pts);
Yuqian Li5eb8fc52017-08-08 14:00:52 -0400241 originalWinding = static_cast<SkAnalyticQuadraticEdge*>(currE)->fQEdge.fWinding;
Yuqian Lidf60e362017-07-25 11:26:31 -0400242 break;
243 }
244 case 4: {
Yuqian Li5eb8fc52017-08-08 14:00:52 -0400245 sortY = false;
Yuqian Lidf60e362017-07-25 11:26:31 -0400246 SkCubic* cubic = static_cast<SkCubic*>(bezier);
247 SkPoint pts[4] = {cubic->fP0, cubic->fP1, cubic->fP2, cubic->fP3};
Yuqian Li5eb8fc52017-08-08 14:00:52 -0400248 edgeSet = static_cast<SkAnalyticCubicEdge*>(currE)->setCubic(pts, sortY);
249 originalWinding = static_cast<SkAnalyticCubicEdge*>(currE)->fCEdge.fWinding;
Yuqian Lidf60e362017-07-25 11:26:31 -0400250 break;
251 }
252 }
253
254 if (!edgeSet) {
255 continue;
256 }
257
258 do {
259 currE->fX = currE->fUpperX;
260
261 SkFixed upperFloor = SkFixedFloorToFixed(currE->fUpperY);
262 SkFixed lowerCeil = SkFixedCeilToFixed(currE->fLowerY);
263 int iy = SkFixedFloorToInt(upperFloor);
264
265 if (lowerCeil <= upperFloor + SK_Fixed1) { // only one row is affected by the currE
266 SkFixed rowHeight = currE->fLowerY - currE->fUpperY;
267 SkFixed nextX = currE->fX + SkFixedMul(currE->fDX, rowHeight);
Yuqian Li3a5e1fe2017-09-28 10:58:38 -0400268 if (iy >= clipBounds.fTop && iy < clipBounds.fBottom) {
Yuqian Li5eb8fc52017-08-08 14:00:52 -0400269 add_coverage_delta_segment<true>(iy, rowHeight, currE, nextX, &result);
270 }
Yuqian Lidf60e362017-07-25 11:26:31 -0400271 continue;
272 }
273
274 // check first row
275 SkFixed rowHeight = upperFloor + SK_Fixed1 - currE->fUpperY;
276 SkFixed nextX;
277 if (rowHeight != SK_Fixed1) { // it's a partial row
278 nextX = currE->fX + SkFixedMul(currE->fDX, rowHeight);
279 add_coverage_delta_segment<true>(iy, rowHeight, currE, nextX, &result);
280 } else { // it's a full row so we can leave it to the while loop
281 iy--; // compensate the iy++ in the while loop
282 nextX = currE->fX;
283 }
284
285 while (true) { // process the full rows in the middle
286 iy++;
287 SkFixed y = SkIntToFixed(iy);
288 currE->fX = nextX;
289 nextX += currE->fDX;
290
291 if (y + SK_Fixed1 > currE->fLowerY) {
292 break; // no full rows left, break
293 }
294
295 // Check whether we're in the rect part that will be covered by blitAntiRect
296 if (iy >= rectTop && iy < rectBot) {
297 SkASSERT(currE->fDX == 0); // If yes, we must be on an edge with fDX = 0.
298 iy = rectBot - 1; // Skip the rect part by advancing iy to the bottom.
299 continue;
300 }
301
302 // Add current edge's coverage deltas on this full row
303 add_coverage_delta_segment<false>(iy, SK_Fixed1, currE, nextX, &result);
304 }
305
306 // last partial row
Yuqian Li3a5e1fe2017-09-28 10:58:38 -0400307 if (SkIntToFixed(iy) < currE->fLowerY &&
308 iy >= clipBounds.fTop && iy < clipBounds.fBottom) {
Yuqian Lidf60e362017-07-25 11:26:31 -0400309 rowHeight = currE->fLowerY - SkIntToFixed(iy);
310 nextX = currE->fX + SkFixedMul(currE->fDX, rowHeight);
311 add_coverage_delta_segment<true>(iy, rowHeight, currE, nextX, &result);
312 }
Yuqian Li5eb8fc52017-08-08 14:00:52 -0400313 // Intended assignment to fWinding to restore the maybe-negated winding (during updateLine)
314 } while ((currE->fWinding = originalWinding) && currE->update(currE->fLowerY, sortY));
Yuqian Lidf60e362017-07-25 11:26:31 -0400315 }
316}
317
Yuqian Lib49d7b02017-11-06 16:21:06 -0500318// For threaded backend with out-of-order init-once, we probably have to take care of the
319// blitRegion, sk_blit_above, sk_blit_below in SkScan::AntiFillPath to maintain the draw order. If
320// we do that, be caureful that blitRect may throw exception if the rect is empty.
321void SkScan::DAAFillPath(const SkPath& path, SkBlitter* blitter, const SkIRect& ir,
322 const SkIRect& clipBounds, bool containedInClip, bool forceRLE) {
323 bool isEvenOdd = path.getFillType() & 1;
324 bool isConvex = path.isConvex();
325 bool isInverse = path.isInverseFillType();
326 bool skipRect = isConvex && !isInverse;
Yuqian Lidf60e362017-07-25 11:26:31 -0400327
Yuqian Lib49d7b02017-11-06 16:21:06 -0500328 SkIRect clippedIR = ir;
329 clippedIR.intersect(clipBounds);
Yuqian Lidf60e362017-07-25 11:26:31 -0400330
Yuqian Lib49d7b02017-11-06 16:21:06 -0500331 // The overhead of even constructing SkCoverageDeltaList/Mask is too big.
332 // So TryBlitFatAntiRect and return if it's successful.
333 if (!isInverse && TryBlitFatAntiRect(blitter, path, clipBounds)) {
334 return;
335 }
Yuqian Lidf60e362017-07-25 11:26:31 -0400336
Yuqian Libd40a5b2017-09-01 15:38:45 -0400337#ifdef GOOGLE3
Yuqian Lib49d7b02017-11-06 16:21:06 -0500338 constexpr int STACK_SIZE = 12 << 10; // 12K stack size alloc; Google3 has 16K limit.
Yuqian Libd40a5b2017-09-01 15:38:45 -0400339#else
Yuqian Lib49d7b02017-11-06 16:21:06 -0500340 constexpr int STACK_SIZE = 64 << 10; // 64k stack size to avoid heap allocation
Yuqian Libd40a5b2017-09-01 15:38:45 -0400341#endif
Yuqian Lib49d7b02017-11-06 16:21:06 -0500342 SkSTArenaAlloc<STACK_SIZE> alloc; // avoid heap allocation with SkSTArenaAlloc
Yuqian Libd40a5b2017-09-01 15:38:45 -0400343
Yuqian Lib49d7b02017-11-06 16:21:06 -0500344 // Only blitter->blitXXX needs to be done in order in the threaded backend.
345 // Everything before can be done out of order in the threaded backend.
346 if (!forceRLE && !isInverse && SkCoverageDeltaMask::Suitable(clippedIR)) {
347 SkCoverageDeltaMask deltaMask(&alloc, clippedIR);
348 gen_alpha_deltas(path, clipBounds, deltaMask, blitter, skipRect, containedInClip);
349 deltaMask.convertCoverageToAlpha(isEvenOdd, isInverse, isConvex);
350 blitter->blitMask(deltaMask.prepareSkMask(), clippedIR);
351 } else {
352 SkCoverageDeltaList deltaList(&alloc, clippedIR.fTop, clippedIR.fBottom, forceRLE);
353 gen_alpha_deltas(path, clipBounds, deltaList, blitter, skipRect, containedInClip);
354 blitter->blitCoverageDeltas(&deltaList, clipBounds, isEvenOdd, isInverse, isConvex);
355 }
Yuqian Lidf60e362017-07-25 11:26:31 -0400356}