blob: c999ce08c99cfc05804dee1af5bec9846d301293 [file] [log] [blame]
mike@reedtribe.orge51755f2011-12-10 19:36:56 +00001#include "SkArithmeticMode.h"
2#include "SkColorPriv.h"
3#include "SkUnPreMultiply.h"
4
5class SkArithmeticMode_scalar : public SkXfermode {
6public:
7 SkArithmeticMode_scalar(SkScalar k1, SkScalar k2, SkScalar k3, SkScalar k4) {
8 fK[0] = k1;
9 fK[1] = k2;
10 fK[2] = k3;
11 fK[3] = k4;
12 }
13
14 virtual void xfer32(SkPMColor dst[], const SkPMColor src[], int count,
15 const SkAlpha aa[]) SK_OVERRIDE;
mike@reedtribe.orge51755f2011-12-10 19:36:56 +000016
djsollen@google.comba28d032012-03-26 17:57:35 +000017 SK_DECLARE_UNFLATTENABLE_OBJECT()
mike@reedtribe.orge51755f2011-12-10 19:36:56 +000018
19private:
20 SkScalar fK[4];
21};
22
mike@reedtribe.orge51755f2011-12-10 19:36:56 +000023static int pinToByte(int value) {
24 if (value < 0) {
25 value = 0;
26 } else if (value > 255) {
27 value = 255;
28 }
29 return value;
30}
31
32static int arith(SkScalar k1, SkScalar k2, SkScalar k3, SkScalar k4,
33 int src, int dst) {
34 SkScalar result = SkScalarMul(k1, src * dst) +
35 SkScalarMul(k2, src) +
36 SkScalarMul(k3, dst) +
37 k4;
38 int res = SkScalarRoundToInt(result);
39 return pinToByte(res);
40}
41
42static int blend(int src, int dst, int scale) {
43 return dst + ((src - dst) * scale >> 8);
44}
45
46static bool needsUnpremul(int alpha) {
47 return 0 != alpha && 0xFF != alpha;
48}
49
50void SkArithmeticMode_scalar::xfer32(SkPMColor dst[], const SkPMColor src[],
51 int count, const SkAlpha aaCoverage[]) {
52 SkScalar k1 = fK[0] / 255;
53 SkScalar k2 = fK[1];
54 SkScalar k3 = fK[2];
55 SkScalar k4 = fK[3] * 255;
56
57 for (int i = 0; i < count; ++i) {
58 if ((NULL == aaCoverage) || aaCoverage[i]) {
59 SkPMColor sc = src[i];
60 SkPMColor dc = dst[i];
61 int sa = SkGetPackedA32(sc);
62 int da = SkGetPackedA32(dc);
63
64 int srcNeedsUnpremul = needsUnpremul(sa);
65 int dstNeedsUnpremul = needsUnpremul(sa);
66
67 int a, r, g, b;
68
tomhudson@google.com5efe0cb2012-04-10 19:14:48 +000069 if (!srcNeedsUnpremul && !dstNeedsUnpremul) {
mike@reedtribe.orge51755f2011-12-10 19:36:56 +000070 a = arith(k1, k2, k3, k4, sa, sa);
71 r = arith(k1, k2, k3, k4, SkGetPackedR32(sc), SkGetPackedR32(dc));
72 g = arith(k1, k2, k3, k4, SkGetPackedG32(sc), SkGetPackedG32(dc));
73 b = arith(k1, k2, k3, k4, SkGetPackedB32(sc), SkGetPackedB32(dc));
74 } else {
75 int sr = SkGetPackedR32(sc);
76 int sg = SkGetPackedG32(sc);
77 int sb = SkGetPackedB32(sc);
78 if (srcNeedsUnpremul) {
79 SkUnPreMultiply::Scale scale = SkUnPreMultiply::GetScale(sa);
80 sr = SkUnPreMultiply::ApplyScale(scale, sr);
81 sg = SkUnPreMultiply::ApplyScale(scale, sg);
82 sb = SkUnPreMultiply::ApplyScale(scale, sb);
83 }
84
85 int dr = SkGetPackedR32(dc);
86 int dg = SkGetPackedG32(dc);
87 int db = SkGetPackedB32(dc);
88 if (dstNeedsUnpremul) {
89 SkUnPreMultiply::Scale scale = SkUnPreMultiply::GetScale(da);
90 dr = SkUnPreMultiply::ApplyScale(scale, dr);
91 dg = SkUnPreMultiply::ApplyScale(scale, dg);
92 db = SkUnPreMultiply::ApplyScale(scale, db);
93 }
94
95 a = arith(k1, k2, k3, k4, sa, sa);
96 r = arith(k1, k2, k3, k4, sr, dr);
97 g = arith(k1, k2, k3, k4, sg, dg);
98 b = arith(k1, k2, k3, k4, sb, db);
99 }
100
101 // apply antialias coverage if necessary
102 if (aaCoverage && 0xFF != aaCoverage[i]) {
103 int scale = aaCoverage[i] + (aaCoverage[i] >> 7);
104 a = blend(a, SkGetPackedA32(sc), scale);
105 r = blend(r, SkGetPackedR32(sc), scale);
106 g = blend(g, SkGetPackedG32(sc), scale);
107 b = blend(b, SkGetPackedB32(sc), scale);
108 }
109
110 // turn the result back into premul
111 if (0xFF != a) {
112 int scale = a + (a >> 7);
113 r = SkAlphaMul(r, scale);
114 g = SkAlphaMul(g, scale);
115 b = SkAlphaMul(b, scale);
116 }
117 dst[i] = SkPackARGB32(a, r, g, b);
118 }
119 }
120}
121
122
123///////////////////////////////////////////////////////////////////////////////
124
125static bool fitsInBits(SkScalar x, int bits) {
126#ifdef SK_SCALAR_IS_FIXED
127 x = SkAbs32(x);
128 x += 1 << 7;
129 x >>= 8;
130 return x < (1 << (bits - 1));
131#else
132 return SkScalarAbs(x) < (1 << (bits - 1));
133#endif
134}
135
caryclark@google.com383d5d42012-06-06 12:09:18 +0000136#if 0 // UNUSED
mike@reedtribe.orge51755f2011-12-10 19:36:56 +0000137static int32_t toDot8(SkScalar x) {
138#ifdef SK_SCALAR_IS_FIXED
139 x += 1 << 7;
140 x >>= 8;
141 return x;
142#else
143 return (int32_t)(x * 256);
144#endif
145}
caryclark@google.com383d5d42012-06-06 12:09:18 +0000146#endif
mike@reedtribe.orge51755f2011-12-10 19:36:56 +0000147
148SkXfermode* SkArithmeticMode::Create(SkScalar k1, SkScalar k2,
149 SkScalar k3, SkScalar k4) {
150 if (fitsInBits(k1, 8) && fitsInBits(k2, 16) &&
151 fitsInBits(k2, 16) && fitsInBits(k2, 24)) {
152
caryclark@google.com383d5d42012-06-06 12:09:18 +0000153#if 0 // UNUSED
mike@reedtribe.orge51755f2011-12-10 19:36:56 +0000154 int32_t i1 = toDot8(k1);
155 int32_t i2 = toDot8(k2);
156 int32_t i3 = toDot8(k3);
157 int32_t i4 = toDot8(k4);
mike@reedtribe.orge51755f2011-12-10 19:36:56 +0000158 if (i1) {
159 return SkNEW_ARGS(SkArithmeticMode_quad, (i1, i2, i3, i4));
160 }
161 if (0 == i2) {
162 return SkNEW_ARGS(SkArithmeticMode_dst, (i3, i4));
163 }
164 if (0 == i3) {
165 return SkNEW_ARGS(SkArithmeticMode_src, (i2, i4));
166 }
167 return SkNEW_ARGS(SkArithmeticMode_linear, (i2, i3, i4));
168#endif
169 }
170 return SkNEW_ARGS(SkArithmeticMode_scalar, (k1, k2, k3, k4));
171}
172