blob: 1b27fc25d4b9639b4f98decd64b31a7b8beb62ca [file] [log] [blame]
Mike Kleinbaaf8ad2016-09-29 09:04:15 -04001/*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef SkRasterPipeline_opts_DEFINED
9#define SkRasterPipeline_opts_DEFINED
10
Mike Klein1f49f262016-10-31 19:49:27 -040011#include "SkColorPriv.h"
raftias25636012016-11-11 15:27:39 -080012#include "SkColorLookUpTable.h"
Matt Sarettdb4d4062016-11-16 16:07:15 -050013#include "SkColorSpaceXform_A2B.h"
14#include "SkColorSpaceXformPriv.h"
Mike Kleinbaaf8ad2016-09-29 09:04:15 -040015#include "SkHalf.h"
Mike Klein46e66a22016-11-21 16:19:34 -050016#include "SkImageShaderContext.h"
Mike Kleina0c4c342016-11-29 13:58:49 -050017#include "SkMSAN.h"
Mike Kleinbaaf8ad2016-09-29 09:04:15 -040018#include "SkPM4f.h"
mtklein125b2aa2016-11-04 13:41:34 -070019#include "SkPM4fPriv.h"
Mike Kleinbaaf8ad2016-09-29 09:04:15 -040020#include "SkRasterPipeline.h"
21#include "SkSRGB.h"
22
Mike Kleinaebfb452016-10-25 10:27:33 -040023namespace {
24
Mike Klein2878e762016-10-19 21:05:17 -040025#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
26 static constexpr int N = 8;
27#else
28 static constexpr int N = 4;
29#endif
30
mtkleina4a44882016-11-04 13:20:07 -070031 using SkNf = SkNx<N, float>;
Mike Kleind5de0132016-11-28 09:33:02 -050032 using SkNi = SkNx<N, int32_t>;
33 using SkNu = SkNx<N, uint32_t>;
mtkleina4a44882016-11-04 13:20:07 -070034 using SkNh = SkNx<N, uint16_t>;
Mike Klein06a65e22016-11-17 12:39:09 -050035 using SkNb = SkNx<N, uint8_t>;
Mike Kleinbaaf8ad2016-09-29 09:04:15 -040036
Mike Klein464e6a12017-01-04 11:04:01 -050037 using Fn = void(SK_VECTORCALL *)(size_t x_tail, void** p, SkNf,SkNf,SkNf,SkNf,
38 SkNf,SkNf,SkNf,SkNf);
Mike Kleinbddd2342016-11-29 12:46:58 -050039 // x_tail encodes two values x and tail as x*N+tail, where 0 <= tail < N.
40 // x is the induction variable we're walking along, incrementing by N each step.
41 // tail == 0 means work with a full N pixels; otherwise use only the low tail pixels.
Mike Klein464e6a12017-01-04 11:04:01 -050042 //
43 // p is our program, a sequence of Fn to call interlaced with any void* context pointers. E.g.
44 // &load_8888
45 // (src ptr)
46 // &from_srgb
Mike Klein8c8cb5b2017-01-06 10:21:56 -050047 // &move_src_dst
48 // &load_f16
Mike Klein464e6a12017-01-04 11:04:01 -050049 // (dst ptr)
Mike Klein8c8cb5b2017-01-06 10:21:56 -050050 // &swap
Mike Klein464e6a12017-01-04 11:04:01 -050051 // &srcover
52 // &store_f16
53 // (dst ptr)
54 // &just_return
Mike Kleinbddd2342016-11-29 12:46:58 -050055
Mike Kleinaebfb452016-10-25 10:27:33 -040056} // namespace
Mike Klein2878e762016-10-19 21:05:17 -040057
Mike Klein04adfda2016-10-12 09:52:55 -040058#define SI static inline
Mike Kleinbaaf8ad2016-09-29 09:04:15 -040059
Mike Klein464e6a12017-01-04 11:04:01 -050060// Basically, return *(*ptr)++, maybe faster than the compiler can do it.
61SI void* load_and_increment(void*** ptr) {
62 // We do this often enough that it's worth hyper-optimizing.
63 // x86 can do this in one instruction if ptr is in rsi.
64 // (This is why p is the second argument to Fn: it's passed in rsi.)
65#if defined(__GNUC__) && defined(__x86_64__)
66 void* rax;
67 __asm__("lodsq" : "=a"(rax), "+S"(*ptr));
68 return rax;
69#else
70 return *(*ptr)++;
71#endif
72}
73
Mike Klein49372e62016-10-20 18:05:23 -040074// Stages are logically a pipeline, and physically are contiguous in an array.
75// To get to the next stage, we just increment our pointer to the next array element.
Mike Klein464e6a12017-01-04 11:04:01 -050076SI void SK_VECTORCALL next(size_t x_tail, void** p, SkNf r, SkNf g, SkNf b, SkNf a,
77 SkNf dr, SkNf dg, SkNf db, SkNf da) {
78 auto next = (Fn)load_and_increment(&p);
79 next(x_tail,p, r,g,b,a, dr,dg,db,da);
Mike Klein49372e62016-10-20 18:05:23 -040080}
81
Mike Klein729b5822016-11-28 18:23:23 -050082// Stages defined below always call next.
83// This is always the last stage, a backstop that actually returns to the caller when done.
Mike Klein464e6a12017-01-04 11:04:01 -050084SI void SK_VECTORCALL just_return(size_t, void**, SkNf, SkNf, SkNf, SkNf,
Mike Klein729b5822016-11-28 18:23:23 -050085 SkNf, SkNf, SkNf, SkNf) {}
86
Mike Klein2cbc33d2016-11-28 16:30:30 -050087#define STAGE(name) \
Mike Klein464e6a12017-01-04 11:04:01 -050088 static SK_ALWAYS_INLINE void name##_kernel(size_t x, size_t tail, \
Mike Kleinaebfb452016-10-25 10:27:33 -040089 SkNf& r, SkNf& g, SkNf& b, SkNf& a, \
90 SkNf& dr, SkNf& dg, SkNf& db, SkNf& da); \
Mike Klein464e6a12017-01-04 11:04:01 -050091 SI void SK_VECTORCALL name(size_t x_tail, void** p, \
Mike Kleinaebfb452016-10-25 10:27:33 -040092 SkNf r, SkNf g, SkNf b, SkNf a, \
93 SkNf dr, SkNf dg, SkNf db, SkNf da) { \
Mike Klein464e6a12017-01-04 11:04:01 -050094 name##_kernel(x_tail/N, x_tail%N, r,g,b,a, dr,dg,db,da); \
95 next(x_tail,p, r,g,b,a, dr,dg,db,da); \
Mike Kleinaebfb452016-10-25 10:27:33 -040096 } \
Mike Klein464e6a12017-01-04 11:04:01 -050097 static SK_ALWAYS_INLINE void name##_kernel(size_t x, size_t tail, \
Mike Kleinaebfb452016-10-25 10:27:33 -040098 SkNf& r, SkNf& g, SkNf& b, SkNf& a, \
Mike Klein04adfda2016-10-12 09:52:55 -040099 SkNf& dr, SkNf& dg, SkNf& db, SkNf& da)
Mike Kleinbaaf8ad2016-09-29 09:04:15 -0400100
Mike Klein464e6a12017-01-04 11:04:01 -0500101#define STAGE_CTX(name, Ctx) \
102 static SK_ALWAYS_INLINE void name##_kernel(Ctx ctx, size_t x, size_t tail, \
103 SkNf& r, SkNf& g, SkNf& b, SkNf& a, \
104 SkNf& dr, SkNf& dg, SkNf& db, SkNf& da); \
105 SI void SK_VECTORCALL name(size_t x_tail, void** p, \
106 SkNf r, SkNf g, SkNf b, SkNf a, \
107 SkNf dr, SkNf dg, SkNf db, SkNf da) { \
108 auto ctx = (Ctx)load_and_increment(&p); \
109 name##_kernel(ctx, x_tail/N, x_tail%N, r,g,b,a, dr,dg,db,da); \
110 next(x_tail,p, r,g,b,a, dr,dg,db,da); \
111 } \
Mike Klein464e6a12017-01-04 11:04:01 -0500112 static SK_ALWAYS_INLINE void name##_kernel(Ctx ctx, size_t x, size_t tail, \
113 SkNf& r, SkNf& g, SkNf& b, SkNf& a, \
114 SkNf& dr, SkNf& dg, SkNf& db, SkNf& da)
Mike Kleinbaaf8ad2016-09-29 09:04:15 -0400115
Mike Klein9161ef02016-10-04 14:03:27 -0400116// Many xfermodes apply the same logic to each channel.
Mike Kleinaebfb452016-10-25 10:27:33 -0400117#define RGBA_XFERMODE(name) \
118 static SK_ALWAYS_INLINE SkNf name##_kernel(const SkNf& s, const SkNf& sa, \
119 const SkNf& d, const SkNf& da); \
Mike Klein464e6a12017-01-04 11:04:01 -0500120 SI void SK_VECTORCALL name(size_t x_tail, void** p, \
Mike Kleinaebfb452016-10-25 10:27:33 -0400121 SkNf r, SkNf g, SkNf b, SkNf a, \
122 SkNf dr, SkNf dg, SkNf db, SkNf da) { \
123 r = name##_kernel(r,a,dr,da); \
124 g = name##_kernel(g,a,dg,da); \
125 b = name##_kernel(b,a,db,da); \
126 a = name##_kernel(a,a,da,da); \
Mike Klein464e6a12017-01-04 11:04:01 -0500127 next(x_tail,p, r,g,b,a, dr,dg,db,da); \
Mike Kleinaebfb452016-10-25 10:27:33 -0400128 } \
129 static SK_ALWAYS_INLINE SkNf name##_kernel(const SkNf& s, const SkNf& sa, \
Mike Klein04adfda2016-10-12 09:52:55 -0400130 const SkNf& d, const SkNf& da)
Mike Klein9161ef02016-10-04 14:03:27 -0400131
132// Most of the rest apply the same logic to color channels and use srcover's alpha logic.
Mike Kleinaebfb452016-10-25 10:27:33 -0400133#define RGB_XFERMODE(name) \
134 static SK_ALWAYS_INLINE SkNf name##_kernel(const SkNf& s, const SkNf& sa, \
135 const SkNf& d, const SkNf& da); \
Mike Klein464e6a12017-01-04 11:04:01 -0500136 SI void SK_VECTORCALL name(size_t x_tail, void** p, \
Mike Kleinaebfb452016-10-25 10:27:33 -0400137 SkNf r, SkNf g, SkNf b, SkNf a, \
138 SkNf dr, SkNf dg, SkNf db, SkNf da) { \
139 r = name##_kernel(r,a,dr,da); \
140 g = name##_kernel(g,a,dg,da); \
141 b = name##_kernel(b,a,db,da); \
142 a = a + (da * (1.0f-a)); \
Mike Klein464e6a12017-01-04 11:04:01 -0500143 next(x_tail,p, r,g,b,a, dr,dg,db,da); \
Mike Kleinaebfb452016-10-25 10:27:33 -0400144 } \
145 static SK_ALWAYS_INLINE SkNf name##_kernel(const SkNf& s, const SkNf& sa, \
Mike Klein04adfda2016-10-12 09:52:55 -0400146 const SkNf& d, const SkNf& da)
147
Mike Klein2cbc33d2016-11-28 16:30:30 -0500148template <typename T>
Mike Kleinaebfb452016-10-25 10:27:33 -0400149SI SkNx<N,T> load(size_t tail, const T* src) {
Mike Klein2cbc33d2016-11-28 16:30:30 -0500150 if (tail) {
Mike Kleinaebfb452016-10-25 10:27:33 -0400151 T buf[8] = {0};
152 switch (tail & (N-1)) {
153 case 7: buf[6] = src[6];
154 case 6: buf[5] = src[5];
155 case 5: buf[4] = src[4];
156 case 4: buf[3] = src[3];
157 case 3: buf[2] = src[2];
158 case 2: buf[1] = src[1];
159 }
160 buf[0] = src[0];
161 return SkNx<N,T>::Load(buf);
162 }
163 return SkNx<N,T>::Load(src);
164}
Mike Klein2cbc33d2016-11-28 16:30:30 -0500165template <typename T>
Mike Klein56b50792016-11-29 08:14:49 -0500166SI SkNx<N,T> gather(size_t tail, const T* src, const SkNi& offset) {
167 if (tail) {
168 T buf[8] = {0};
169 switch (tail & (N-1)) {
170 case 7: buf[6] = src[offset[6]];
171 case 6: buf[5] = src[offset[5]];
172 case 5: buf[4] = src[offset[4]];
173 case 4: buf[3] = src[offset[3]];
174 case 3: buf[2] = src[offset[2]];
175 case 2: buf[1] = src[offset[1]];
176 }
177 buf[0] = src[offset[0]];
178 return SkNx<N,T>::Load(buf);
179 }
180 T buf[8];
181 for (size_t i = 0; i < N; i++) {
182 buf[i] = src[offset[i]];
183 }
184 return SkNx<N,T>::Load(buf);
185}
186template <typename T>
Mike Kleinaebfb452016-10-25 10:27:33 -0400187SI void store(size_t tail, const SkNx<N,T>& v, T* dst) {
Mike Klein2cbc33d2016-11-28 16:30:30 -0500188 if (tail) {
Mike Kleinaebfb452016-10-25 10:27:33 -0400189 switch (tail & (N-1)) {
190 case 7: dst[6] = v[6];
191 case 6: dst[5] = v[5];
192 case 5: dst[4] = v[4];
193 case 4: dst[3] = v[3];
194 case 3: dst[2] = v[2];
195 case 2: dst[1] = v[1];
196 }
197 dst[0] = v[0];
198 return;
199 }
200 v.store(dst);
201}
202
Mike Klein56b50792016-11-29 08:14:49 -0500203#if !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
204 SI __m256i mask(size_t tail) {
205 static const int masks[][8] = {
206 {~0,~0,~0,~0, ~0,~0,~0,~0 }, // remember, tail == 0 ~~> load all N
207 {~0, 0, 0, 0, 0, 0, 0, 0 },
208 {~0,~0, 0, 0, 0, 0, 0, 0 },
209 {~0,~0,~0, 0, 0, 0, 0, 0 },
210 {~0,~0,~0,~0, 0, 0, 0, 0 },
211 {~0,~0,~0,~0, ~0, 0, 0, 0 },
212 {~0,~0,~0,~0, ~0,~0, 0, 0 },
213 {~0,~0,~0,~0, ~0,~0,~0, 0 },
214 };
215 return SkNi::Load(masks + tail).fVec;
216 }
217
218 SI SkNi load(size_t tail, const int32_t* src) {
219 return tail ? _mm256_maskload_epi32((const int*)src, mask(tail))
220 : SkNi::Load(src);
221 }
222 SI SkNu load(size_t tail, const uint32_t* src) {
223 return tail ? _mm256_maskload_epi32((const int*)src, mask(tail))
224 : SkNu::Load(src);
225 }
Matt Sarettf6878ba2016-12-01 14:46:12 -0500226 SI SkNf load(size_t tail, const float* src) {
227 return tail ? _mm256_maskload_ps((const float*)src, mask(tail))
228 : SkNf::Load(src);
229 }
Mike Klein56b50792016-11-29 08:14:49 -0500230 SI SkNi gather(size_t tail, const int32_t* src, const SkNi& offset) {
Matt Sarettf6878ba2016-12-01 14:46:12 -0500231 auto m = mask(tail);
232 return _mm256_mask_i32gather_epi32(SkNi(0).fVec, (const int*)src, offset.fVec, m, 4);
Mike Klein56b50792016-11-29 08:14:49 -0500233 }
234 SI SkNu gather(size_t tail, const uint32_t* src, const SkNi& offset) {
Matt Sarettf6878ba2016-12-01 14:46:12 -0500235 auto m = mask(tail);
236 return _mm256_mask_i32gather_epi32(SkNi(0).fVec, (const int*)src, offset.fVec, m, 4);
237 }
238 SI SkNf gather(size_t tail, const float* src, const SkNi& offset) {
239 auto m = _mm256_castsi256_ps(mask(tail));
240 return _mm256_mask_i32gather_ps(SkNf(0).fVec, (const float*)src, offset.fVec, m, 4);
Mike Klein56b50792016-11-29 08:14:49 -0500241 }
Mike Kleina0c4c342016-11-29 13:58:49 -0500242
243 static const char* bug = "I don't think MSAN understands maskstore.";
244
Mike Klein56b50792016-11-29 08:14:49 -0500245 SI void store(size_t tail, const SkNi& v, int32_t* dst) {
Mike Kleina0c4c342016-11-29 13:58:49 -0500246 if (tail) {
247 _mm256_maskstore_epi32((int*)dst, mask(tail), v.fVec);
248 return sk_msan_mark_initialized(dst, dst+tail, bug);
249 }
250 v.store(dst);
Mike Klein56b50792016-11-29 08:14:49 -0500251 }
252 SI void store(size_t tail, const SkNu& v, uint32_t* dst) {
Mike Kleina0c4c342016-11-29 13:58:49 -0500253 if (tail) {
254 _mm256_maskstore_epi32((int*)dst, mask(tail), v.fVec);
255 return sk_msan_mark_initialized(dst, dst+tail, bug);
256 }
257 v.store(dst);
Mike Klein56b50792016-11-29 08:14:49 -0500258 }
Matt Sarettf6878ba2016-12-01 14:46:12 -0500259 SI void store(size_t tail, const SkNf& v, float* dst) {
260 if (tail) {
261 _mm256_maskstore_ps((float*)dst, mask(tail), v.fVec);
262 return sk_msan_mark_initialized(dst, dst+tail, bug);
263 }
264 v.store(dst);
265 }
Mike Klein56b50792016-11-29 08:14:49 -0500266#endif
267
Mike Kleine2e2ae22016-12-02 15:21:03 -0500268SI SkNf SkNf_fma(const SkNf& f, const SkNf& m, const SkNf& a) { return SkNx_fma(f,m,a); }
269
270SI SkNi SkNf_round(const SkNf& x, const SkNf& scale) {
271 // Every time I try, _mm_cvtps_epi32 benches as slower than using FMA and _mm_cvttps_epi32. :/
272 return SkNx_cast<int>(SkNf_fma(x,scale, 0.5f));
273}
274
275SI SkNf SkNf_from_byte(const SkNi& x) {
276 // Same trick as in store_8888: 0x470000BB == 32768.0f + BB/256.0f for all bytes BB.
277 auto v = 0x47000000 | x;
278 // Read this as (pun_float(v) - 32768.0f) * (256/255.0f), redistributed to be an FMA.
279 return SkNf_fma(SkNf::Load(&v), 256/255.0f, -32768*256/255.0f);
280}
281SI SkNf SkNf_from_byte(const SkNu& x) { return SkNf_from_byte(SkNi::Load(&x)); }
282SI SkNf SkNf_from_byte(const SkNb& x) { return SkNf_from_byte(SkNx_cast<int>(x)); }
283
Mike Kleind5de0132016-11-28 09:33:02 -0500284SI void from_8888(const SkNu& _8888, SkNf* r, SkNf* g, SkNf* b, SkNf* a) {
Mike Kleine2e2ae22016-12-02 15:21:03 -0500285 *r = SkNf_from_byte((_8888 ) & 0xff);
286 *g = SkNf_from_byte((_8888 >> 8) & 0xff);
287 *b = SkNf_from_byte((_8888 >> 16) & 0xff);
288 *a = SkNf_from_byte((_8888 >> 24) );
Mike Kleind5de0132016-11-28 09:33:02 -0500289}
Mike Klein6b77f1c2016-11-22 15:50:12 -0500290SI void from_4444(const SkNh& _4444, SkNf* r, SkNf* g, SkNf* b, SkNf* a) {
291 auto _32_bit = SkNx_cast<int>(_4444);
292
293 *r = SkNx_cast<float>(_32_bit & (0xF << SK_R4444_SHIFT)) * (1.0f / (0xF << SK_R4444_SHIFT));
294 *g = SkNx_cast<float>(_32_bit & (0xF << SK_G4444_SHIFT)) * (1.0f / (0xF << SK_G4444_SHIFT));
295 *b = SkNx_cast<float>(_32_bit & (0xF << SK_B4444_SHIFT)) * (1.0f / (0xF << SK_B4444_SHIFT));
296 *a = SkNx_cast<float>(_32_bit & (0xF << SK_A4444_SHIFT)) * (1.0f / (0xF << SK_A4444_SHIFT));
297}
Mike Kleinaebfb452016-10-25 10:27:33 -0400298SI void from_565(const SkNh& _565, SkNf* r, SkNf* g, SkNf* b) {
299 auto _32_bit = SkNx_cast<int>(_565);
300
301 *r = SkNx_cast<float>(_32_bit & SK_R16_MASK_IN_PLACE) * (1.0f / SK_R16_MASK_IN_PLACE);
302 *g = SkNx_cast<float>(_32_bit & SK_G16_MASK_IN_PLACE) * (1.0f / SK_G16_MASK_IN_PLACE);
303 *b = SkNx_cast<float>(_32_bit & SK_B16_MASK_IN_PLACE) * (1.0f / SK_B16_MASK_IN_PLACE);
304}
Mike Klein49580062016-12-11 11:42:07 -0500305SI void from_f16(const void* px, SkNf* r, SkNf* g, SkNf* b, SkNf* a) {
306 SkNh rh, gh, bh, ah;
307 SkNh::Load4(px, &rh, &gh, &bh, &ah);
308
309 *r = SkHalfToFloat_finite_ftz(rh);
310 *g = SkHalfToFloat_finite_ftz(gh);
311 *b = SkHalfToFloat_finite_ftz(bh);
312 *a = SkHalfToFloat_finite_ftz(ah);
313}
Mike Kleinaebfb452016-10-25 10:27:33 -0400314
Mike Klein464e6a12017-01-04 11:04:01 -0500315STAGE_CTX(trace, const char*) {
316 SkDebugf("%s\n", ctx);
Mike Kleina9312fd2016-11-16 13:38:15 -0500317}
Mike Klein2cbc33d2016-11-28 16:30:30 -0500318STAGE(registers) {
Mike Kleina9312fd2016-11-16 13:38:15 -0500319 auto print = [](const char* name, const SkNf& v) {
320 SkDebugf("%s:", name);
321 for (int i = 0; i < N; i++) {
322 SkDebugf(" %g", v[i]);
323 }
324 SkDebugf("\n");
325 };
326 print(" r", r);
327 print(" g", g);
328 print(" b", b);
329 print(" a", a);
330 print("dr", dr);
331 print("dg", dg);
332 print("db", db);
333 print("da", da);
334}
335
Mike Klein2cbc33d2016-11-28 16:30:30 -0500336STAGE(clamp_0) {
Mike Klein130863e2016-10-27 11:29:36 -0400337 a = SkNf::Max(a, 0.0f);
338 r = SkNf::Max(r, 0.0f);
339 g = SkNf::Max(g, 0.0f);
340 b = SkNf::Max(b, 0.0f);
341}
Mike Kleind37d5d92016-12-14 13:38:24 +0000342STAGE(clamp_1) {
343 a = SkNf::Min(a, 1.0f);
344 r = SkNf::Min(r, 1.0f);
345 g = SkNf::Min(g, 1.0f);
346 b = SkNf::Min(b, 1.0f);
347}
Mike Klein2cbc33d2016-11-28 16:30:30 -0500348STAGE(clamp_a) {
Mike Klein130863e2016-10-27 11:29:36 -0400349 a = SkNf::Min(a, 1.0f);
350 r = SkNf::Min(r, a);
351 g = SkNf::Min(g, a);
352 b = SkNf::Min(b, a);
353}
Matt Sarettdb4d4062016-11-16 16:07:15 -0500354
Mike Klein2cbc33d2016-11-28 16:30:30 -0500355STAGE(unpremul) {
Mike Klein5a130112016-11-28 09:48:31 -0500356 auto scale = (a == 0.0f).thenElse(0.0f, 1.0f/a);
357 r *= scale;
358 g *= scale;
359 b *= scale;
Mike Kleineea7c162016-11-03 10:20:35 -0400360}
Mike Klein2cbc33d2016-11-28 16:30:30 -0500361STAGE(premul) {
Mike Kleineea7c162016-11-03 10:20:35 -0400362 r *= a;
363 g *= a;
364 b *= a;
365}
366
Mike Klein464e6a12017-01-04 11:04:01 -0500367STAGE_CTX(set_rgb, const float*) {
368 r = ctx[0];
369 g = ctx[1];
370 b = ctx[2];
Mike Klein7a14734d2016-11-29 15:33:39 -0500371}
Mike Klein8c8cb5b2017-01-06 10:21:56 -0500372STAGE(swap_rb) { SkTSwap(r,b); }
Mike Klein7a14734d2016-11-29 15:33:39 -0500373
Mike Klein2cbc33d2016-11-28 16:30:30 -0500374STAGE(move_src_dst) {
Mike Kleinc5093412016-11-04 16:36:39 -0400375 dr = r;
376 dg = g;
377 db = b;
378 da = a;
Mike Kleinaebfb452016-10-25 10:27:33 -0400379}
Mike Klein2cbc33d2016-11-28 16:30:30 -0500380STAGE(move_dst_src) {
Mike Kleind5de0132016-11-28 09:33:02 -0500381 r = dr;
382 g = dg;
383 b = db;
384 a = da;
Mike Kleinfb191da2016-11-15 13:20:33 -0500385}
Mike Klein8c8cb5b2017-01-06 10:21:56 -0500386STAGE(swap) {
387 SkTSwap(r,dr);
388 SkTSwap(g,dg);
389 SkTSwap(b,db);
390 SkTSwap(a,da);
391}
Mike Kleind5de0132016-11-28 09:33:02 -0500392
Mike Klein729b5822016-11-28 18:23:23 -0500393STAGE(from_srgb) {
Mike Kleinb04c3522016-11-28 11:55:58 -0500394 r = sk_linear_from_srgb_math(r);
395 g = sk_linear_from_srgb_math(g);
396 b = sk_linear_from_srgb_math(b);
397}
Mike Klein2cbc33d2016-11-28 16:30:30 -0500398STAGE(to_srgb) {
Mike Kleine03339a2016-11-28 13:24:27 -0500399 r = sk_linear_to_srgb_needs_round(r);
400 g = sk_linear_to_srgb_needs_round(g);
401 b = sk_linear_to_srgb_needs_round(b);
Mike Kleinb04c3522016-11-28 11:55:58 -0500402}
Mike Kleind5de0132016-11-28 09:33:02 -0500403
raftias97524542016-12-14 13:15:05 -0500404STAGE(from_2dot2) {
405 auto from_2dot2 = [](const SkNf& x) {
406 // x^(141/64) = x^(2.20312) is a great approximation of the true value, x^(2.2).
407 // (note: x^(35/16) = x^(2.1875) is an okay one as well and would be quicker)
408 auto x16 = x.rsqrt().rsqrt().rsqrt().rsqrt(); // x^(1/16) = x^(4/64);
409 auto x64 = x16.rsqrt().rsqrt(); // x^(1/64)
410
411 // x^(141/64) = x^(128/64) * x^(12/64) * x^(1/64)
412 return SkNf::Max((x*x) * (x16*x16*x16) * (x64), 0.0f);
413 };
Mike Kleine2b66a72017-01-04 09:59:25 -0500414
raftias97524542016-12-14 13:15:05 -0500415 r = from_2dot2(r);
416 g = from_2dot2(g);
417 b = from_2dot2(b);
418}
Matt Sarettf6878ba2016-12-01 14:46:12 -0500419STAGE(to_2dot2) {
420 auto to_2dot2 = [](const SkNf& x) {
421 // x^(29/64) is a very good approximation of the true value, x^(1/2.2).
422 auto x2 = x.rsqrt(), // x^(-1/2)
423 x32 = x2.rsqrt().rsqrt().rsqrt().rsqrt(), // x^(-1/32)
424 x64 = x32.rsqrt(); // x^(+1/64)
425
426 // 29 = 32 - 2 - 1
Matt Sarettabf8ba32016-12-01 17:02:07 -0500427 return SkNf::Max(x2.invert() * x32 * x64.invert(), 0.0f); // Watch out for NaN.
Matt Sarettf6878ba2016-12-01 14:46:12 -0500428 };
429
430 r = to_2dot2(r);
431 g = to_2dot2(g);
432 b = to_2dot2(b);
433}
434
Mike Kleinaebfb452016-10-25 10:27:33 -0400435// The default shader produces a constant color (from the SkPaint).
Mike Klein464e6a12017-01-04 11:04:01 -0500436STAGE_CTX(constant_color, const SkPM4f*) {
437 r = ctx->r();
438 g = ctx->g();
439 b = ctx->b();
440 a = ctx->a();
Mike Kleinaebfb452016-10-25 10:27:33 -0400441}
442
Mike Klein319ba3d2017-01-20 15:11:54 -0500443// Set up registers with values relevant to shaders.
444STAGE_CTX(seed_shader, const int*) {
445 int y = *ctx;
446
447 static const float dx[] = { 0,1,2,3,4,5,6,7 };
448 r = x + 0.5f + SkNf::Load(dx); // dst pixel center x coordinates
449 g = y + 0.5f; // dst pixel center y coordinate(s)
450 b = 1.0f;
451 a = 0.0f;
452 dr = dg = db = da = 0.0f;
453}
454
Mike Kleinbabd93e2016-11-30 16:05:10 -0500455// s' = sc for a scalar c.
Mike Klein464e6a12017-01-04 11:04:01 -0500456STAGE_CTX(scale_1_float, const float*) {
457 SkNf c = *ctx;
Mike Klein66866172016-11-03 12:22:01 -0400458
459 r *= c;
460 g *= c;
461 b *= c;
462 a *= c;
463}
Mike Kleinaebfb452016-10-25 10:27:33 -0400464// s' = sc for 8-bit c.
Mike Klein464e6a12017-01-04 11:04:01 -0500465STAGE_CTX(scale_u8, const uint8_t**) {
466 auto ptr = *ctx + x;
Mike Klein9ea894b2017-01-03 23:42:04 +0000467 SkNf c = SkNf_from_byte(load(tail, ptr));
Mike Klein464e6a12017-01-04 11:04:01 -0500468
Mike Kleinaebfb452016-10-25 10:27:33 -0400469 r = r*c;
470 g = g*c;
471 b = b*c;
472 a = a*c;
473}
474
Mike Klein729b5822016-11-28 18:23:23 -0500475SI SkNf lerp(const SkNf& from, const SkNf& to, const SkNf& cov) {
Mike Klein87185f72016-12-01 18:22:26 -0500476 return SkNf_fma(to-from, cov, from);
Mike Klein729b5822016-11-28 18:23:23 -0500477}
478
Mike Kleinbabd93e2016-11-30 16:05:10 -0500479// s' = d(1-c) + sc, for a scalar c.
Mike Klein464e6a12017-01-04 11:04:01 -0500480STAGE_CTX(lerp_1_float, const float*) {
481 SkNf c = *ctx;
Mike Klein729b5822016-11-28 18:23:23 -0500482
483 r = lerp(dr, r, c);
484 g = lerp(dg, g, c);
485 b = lerp(db, b, c);
486 a = lerp(da, a, c);
487}
488
Mike Kleinaebfb452016-10-25 10:27:33 -0400489// s' = d(1-c) + sc for 8-bit c.
Mike Klein464e6a12017-01-04 11:04:01 -0500490STAGE_CTX(lerp_u8, const uint8_t**) {
491 auto ptr = *ctx + x;
Mike Klein9ea894b2017-01-03 23:42:04 +0000492 SkNf c = SkNf_from_byte(load(tail, ptr));
Mike Klein464e6a12017-01-04 11:04:01 -0500493
Mike Kleinaebfb452016-10-25 10:27:33 -0400494 r = lerp(dr, r, c);
495 g = lerp(dg, g, c);
496 b = lerp(db, b, c);
497 a = lerp(da, a, c);
498}
499
500// s' = d(1-c) + sc for 565 c.
Mike Klein464e6a12017-01-04 11:04:01 -0500501STAGE_CTX(lerp_565, const uint16_t**) {
502 auto ptr = *ctx + x;
Mike Kleinaebfb452016-10-25 10:27:33 -0400503 SkNf cr, cg, cb;
Mike Klein2cbc33d2016-11-28 16:30:30 -0500504 from_565(load(tail, ptr), &cr, &cg, &cb);
Mike Kleinaebfb452016-10-25 10:27:33 -0400505
506 r = lerp(dr, r, cr);
507 g = lerp(dg, g, cg);
508 b = lerp(db, b, cb);
509 a = 1.0f;
510}
511
Mike Kleine71b1672017-01-13 07:59:23 -0500512STAGE_CTX(load_a8, const uint8_t**) {
513 auto ptr = *ctx + x;
514 r = g = b = 0.0f;
515 a = SkNf_from_byte(load(tail, ptr));
516}
517STAGE_CTX(store_a8, uint8_t**) {
518 auto ptr = *ctx + x;
519 store(tail, SkNx_cast<uint8_t>(SkNf_round(255.0f, a)), ptr);
520}
521
Mike Klein464e6a12017-01-04 11:04:01 -0500522STAGE_CTX(load_565, const uint16_t**) {
523 auto ptr = *ctx + x;
Mike Klein2cbc33d2016-11-28 16:30:30 -0500524 from_565(load(tail, ptr), &r,&g,&b);
Mike Kleinaebfb452016-10-25 10:27:33 -0400525 a = 1.0f;
526}
Mike Klein464e6a12017-01-04 11:04:01 -0500527STAGE_CTX(store_565, uint16_t**) {
528 auto ptr = *ctx + x;
Mike Klein9c77ea12016-12-02 08:29:10 -0500529 store(tail, SkNx_cast<uint16_t>( SkNf_round(r, SK_R16_MASK) << SK_R16_SHIFT
530 | SkNf_round(g, SK_G16_MASK) << SK_G16_SHIFT
531 | SkNf_round(b, SK_B16_MASK) << SK_B16_SHIFT), ptr);
Mike Kleinaebfb452016-10-25 10:27:33 -0400532}
533
Mike Kleinaebfb452016-10-25 10:27:33 -0400534
Mike Klein464e6a12017-01-04 11:04:01 -0500535STAGE_CTX(load_f16, const uint64_t**) {
536 auto ptr = *ctx + x;
Mike Kleinaebfb452016-10-25 10:27:33 -0400537
Mike Klein49580062016-12-11 11:42:07 -0500538 const void* src = ptr;
539 SkNx<N, uint64_t> px;
Mike Klein2cbc33d2016-11-28 16:30:30 -0500540 if (tail) {
Mike Klein49580062016-12-11 11:42:07 -0500541 px = load(tail, ptr);
542 src = &px;
Mike Kleinaebfb452016-10-25 10:27:33 -0400543 }
Mike Klein49580062016-12-11 11:42:07 -0500544 from_f16(src, &r, &g, &b, &a);
Mike Kleinaebfb452016-10-25 10:27:33 -0400545}
Mike Klein464e6a12017-01-04 11:04:01 -0500546STAGE_CTX(store_f16, uint64_t**) {
547 auto ptr = *ctx + x;
Mike Kleinaebfb452016-10-25 10:27:33 -0400548
Mike Klein49580062016-12-11 11:42:07 -0500549 SkNx<N, uint64_t> px;
550 SkNh::Store4(tail ? (void*)&px : (void*)ptr, SkFloatToHalf_finite_ftz(r),
551 SkFloatToHalf_finite_ftz(g),
552 SkFloatToHalf_finite_ftz(b),
553 SkFloatToHalf_finite_ftz(a));
Mike Klein2cbc33d2016-11-28 16:30:30 -0500554 if (tail) {
Mike Klein49580062016-12-11 11:42:07 -0500555 store(tail, px, ptr);
Mike Kleinaebfb452016-10-25 10:27:33 -0400556 }
557}
558
Mike Klein464e6a12017-01-04 11:04:01 -0500559STAGE_CTX(store_f32, SkPM4f**) {
560 auto ptr = *ctx + x;
mtkleina4a44882016-11-04 13:20:07 -0700561
Mike Klein49580062016-12-11 11:42:07 -0500562 SkNx<N, SkPM4f> px;
563 SkNf::Store4(tail ? (void*)&px : (void*)ptr, r,g,b,a);
Mike Klein2cbc33d2016-11-28 16:30:30 -0500564 if (tail) {
Mike Klein49580062016-12-11 11:42:07 -0500565 store(tail, px, ptr);
mtkleina4a44882016-11-04 13:20:07 -0700566 }
567}
568
Mike Kleinaebfb452016-10-25 10:27:33 -0400569
Mike Klein464e6a12017-01-04 11:04:01 -0500570STAGE_CTX(load_8888, const uint32_t**) {
571 auto ptr = *ctx + x;
Mike Klein2cbc33d2016-11-28 16:30:30 -0500572 from_8888(load(tail, ptr), &r, &g, &b, &a);
raftias25636012016-11-11 15:27:39 -0800573}
Mike Klein464e6a12017-01-04 11:04:01 -0500574STAGE_CTX(store_8888, uint32_t**) {
Mike Klein3e056712016-12-02 14:22:57 -0500575 auto byte = [](const SkNf& x, int ix) {
576 // Here's a neat trick: 0x47000000 == 32768.0f, and 0x470000ff == 32768.0f + (255/256.0f).
577 auto v = SkNf_fma(255/256.0f, x, 32768.0f);
578 switch (ix) {
579 case 0: return SkNi::Load(&v) & 0xff; // R
580 case 3: return SkNi::Load(&v) << 24; // A
581 }
582 return (SkNi::Load(&v) & 0xff) << (8*ix); // B or G
583 };
584
Mike Klein464e6a12017-01-04 11:04:01 -0500585 auto ptr = *ctx + x;
Mike Klein3e056712016-12-02 14:22:57 -0500586 store(tail, byte(r,0)|byte(g,1)|byte(b,2)|byte(a,3), (int*)ptr);
raftias25636012016-11-11 15:27:39 -0800587}
588
Matt Sarett379938e2017-01-12 18:34:29 -0500589STAGE_CTX(load_u16_be, const uint64_t**) {
590 auto ptr = *ctx + x;
591 const void* src = ptr;
592 SkNx<N, uint64_t> px;
593 if (tail) {
594 px = load(tail, ptr);
595 src = &px;
596 }
597
598 SkNh rh, gh, bh, ah;
599 SkNh::Load4(src, &rh, &gh, &bh, &ah);
600 r = (1.0f / 65535.0f) * SkNx_cast<float>((rh << 8) | (rh >> 8));
601 g = (1.0f / 65535.0f) * SkNx_cast<float>((gh << 8) | (gh >> 8));
602 b = (1.0f / 65535.0f) * SkNx_cast<float>((bh << 8) | (bh >> 8));
603 a = (1.0f / 65535.0f) * SkNx_cast<float>((ah << 8) | (ah >> 8));
604}
605
Matt Sarett5bee0b62017-01-19 12:04:32 -0500606STAGE_CTX(load_rgb_u16_be, const uint16_t**) {
607 auto ptr = *ctx + 3*x;
608 const void* src = ptr;
609 uint16_t buf[N*3] = {0};
610 if (tail) {
611 memcpy(buf, src, tail*3*sizeof(uint16_t));
612 src = buf;
613 }
614
615 SkNh rh, gh, bh;
616 SkNh::Load3(src, &rh, &gh, &bh);
617 r = (1.0f / 65535.0f) * SkNx_cast<float>((rh << 8) | (rh >> 8));
618 g = (1.0f / 65535.0f) * SkNx_cast<float>((gh << 8) | (gh >> 8));
619 b = (1.0f / 65535.0f) * SkNx_cast<float>((bh << 8) | (bh >> 8));
620 a = 1.0f;
621}
622
Matt Sarett1da27ef2017-01-19 17:14:07 -0500623STAGE_CTX(store_u16_be, uint64_t**) {
624 auto to_u16_be = [](const SkNf& x) {
625 SkNh x16 = SkNx_cast<uint16_t>(65535.0f * x);
626 return (x16 << 8) | (x16 >> 8);
627 };
628
629 auto ptr = *ctx + x;
630 SkNx<N, uint64_t> px;
631 SkNh::Store4(tail ? (void*)&px : (void*)ptr, to_u16_be(r),
632 to_u16_be(g),
633 to_u16_be(b),
634 to_u16_be(a));
635 if (tail) {
636 store(tail, px, ptr);
637 }
638}
639
Mike Klein464e6a12017-01-04 11:04:01 -0500640STAGE_CTX(load_tables, const LoadTablesContext*) {
Matt Sarett379938e2017-01-12 18:34:29 -0500641 auto ptr = (const uint32_t*)ctx->fSrc + x;
Matt Sarettf6878ba2016-12-01 14:46:12 -0500642
643 SkNu rgba = load(tail, ptr);
644 auto to_int = [](const SkNu& v) { return SkNi::Load(&v); };
Mike Klein464e6a12017-01-04 11:04:01 -0500645 r = gather(tail, ctx->fR, to_int((rgba >> 0) & 0xff));
646 g = gather(tail, ctx->fG, to_int((rgba >> 8) & 0xff));
647 b = gather(tail, ctx->fB, to_int((rgba >> 16) & 0xff));
Mike Kleine2e2ae22016-12-02 15:21:03 -0500648 a = SkNf_from_byte(rgba >> 24);
Matt Sarettf6878ba2016-12-01 14:46:12 -0500649}
650
Matt Sarett379938e2017-01-12 18:34:29 -0500651STAGE_CTX(load_tables_u16_be, const LoadTablesContext*) {
652 auto ptr = (const uint64_t*)ctx->fSrc + x;
Matt Sarettc55bc9a2017-01-13 13:58:57 -0500653 const void* src = ptr;
654 SkNx<N, uint64_t> px;
655 if (tail) {
656 px = load(tail, ptr);
657 src = &px;
658 }
Matt Sarett379938e2017-01-12 18:34:29 -0500659
660 SkNh rh, gh, bh, ah;
Matt Sarettc55bc9a2017-01-13 13:58:57 -0500661 SkNh::Load4(src, &rh, &gh, &bh, &ah);
Matt Sarett379938e2017-01-12 18:34:29 -0500662
663 // ctx->fSrc is big-endian, so "& 0xff" grabs the 8 most significant bits of each component.
664 r = gather(tail, ctx->fR, SkNx_cast<int>(rh & 0xff));
665 g = gather(tail, ctx->fG, SkNx_cast<int>(gh & 0xff));
666 b = gather(tail, ctx->fB, SkNx_cast<int>(bh & 0xff));
667 a = (1.0f / 65535.0f) * SkNx_cast<float>((ah << 8) | (ah >> 8));
668}
669
Matt Sarett5bee0b62017-01-19 12:04:32 -0500670STAGE_CTX(load_tables_rgb_u16_be, const LoadTablesContext*) {
671 auto ptr = (const uint16_t*)ctx->fSrc + 3*x;
672 const void* src = ptr;
673 uint16_t buf[N*3] = {0};
674 if (tail) {
675 memcpy(buf, src, tail*3*sizeof(uint16_t));
676 src = buf;
677 }
678
679 SkNh rh, gh, bh;
680 SkNh::Load3(src, &rh, &gh, &bh);
681
682 // ctx->fSrc is big-endian, so "& 0xff" grabs the 8 most significant bits of each component.
683 r = gather(tail, ctx->fR, SkNx_cast<int>(rh & 0xff));
684 g = gather(tail, ctx->fG, SkNx_cast<int>(gh & 0xff));
685 b = gather(tail, ctx->fB, SkNx_cast<int>(bh & 0xff));
686 a = 1.0f;
687}
688
Mike Klein464e6a12017-01-04 11:04:01 -0500689STAGE_CTX(store_tables, const StoreTablesContext*) {
690 auto ptr = ctx->fDst + x;
Matt Sarettf6878ba2016-12-01 14:46:12 -0500691
Mike Klein464e6a12017-01-04 11:04:01 -0500692 float scale = ctx->fCount - 1;
Mike Klein9c77ea12016-12-02 08:29:10 -0500693 SkNi ri = SkNf_round(scale, r);
694 SkNi gi = SkNf_round(scale, g);
695 SkNi bi = SkNf_round(scale, b);
Matt Sarettf6878ba2016-12-01 14:46:12 -0500696
Mike Klein464e6a12017-01-04 11:04:01 -0500697 store(tail, ( SkNx_cast<int>(gather(tail, ctx->fR, ri)) << 0
698 | SkNx_cast<int>(gather(tail, ctx->fG, gi)) << 8
699 | SkNx_cast<int>(gather(tail, ctx->fB, bi)) << 16
700 | SkNf_round(255.0f, a) << 24), (int*)ptr);
Matt Sarettf6878ba2016-12-01 14:46:12 -0500701}
702
Mike Klein729b5822016-11-28 18:23:23 -0500703SI SkNf inv(const SkNf& x) { return 1.0f - x; }
704
Mike Kleinaebfb452016-10-25 10:27:33 -0400705RGBA_XFERMODE(clear) { return 0.0f; }
Mike Kleinaebfb452016-10-25 10:27:33 -0400706RGBA_XFERMODE(srcatop) { return s*da + d*inv(sa); }
707RGBA_XFERMODE(srcin) { return s * da; }
708RGBA_XFERMODE(srcout) { return s * inv(da); }
Mike Klein87185f72016-12-01 18:22:26 -0500709RGBA_XFERMODE(srcover) { return SkNf_fma(d, inv(sa), s); }
Mike Kleinaebfb452016-10-25 10:27:33 -0400710RGBA_XFERMODE(dstatop) { return srcatop_kernel(d,da,s,sa); }
711RGBA_XFERMODE(dstin) { return srcin_kernel (d,da,s,sa); }
712RGBA_XFERMODE(dstout) { return srcout_kernel (d,da,s,sa); }
713RGBA_XFERMODE(dstover) { return srcover_kernel(d,da,s,sa); }
714
715RGBA_XFERMODE(modulate) { return s*d; }
716RGBA_XFERMODE(multiply) { return s*inv(da) + d*inv(sa) + s*d; }
717RGBA_XFERMODE(plus_) { return s + d; }
718RGBA_XFERMODE(screen) { return s + d - s*d; }
719RGBA_XFERMODE(xor_) { return s*inv(da) + d*inv(sa); }
720
721RGB_XFERMODE(colorburn) {
722 return (d == da ).thenElse(d + s*inv(da),
723 (s == 0.0f).thenElse(s + d*inv(sa),
724 sa*(da - SkNf::Min(da, (da-d)*sa/s)) + s*inv(da) + d*inv(sa)));
725}
726RGB_XFERMODE(colordodge) {
727 return (d == 0.0f).thenElse(d + s*inv(da),
728 (s == sa ).thenElse(s + d*inv(sa),
729 sa*SkNf::Min(da, (d*sa)/(sa - s)) + s*inv(da) + d*inv(sa)));
730}
731RGB_XFERMODE(darken) { return s + d - SkNf::Max(s*da, d*sa); }
732RGB_XFERMODE(difference) { return s + d - 2.0f*SkNf::Min(s*da,d*sa); }
733RGB_XFERMODE(exclusion) { return s + d - 2.0f*s*d; }
734RGB_XFERMODE(hardlight) {
735 return s*inv(da) + d*inv(sa)
736 + (2.0f*s <= sa).thenElse(2.0f*s*d, sa*da - 2.0f*(da-d)*(sa-s));
737}
738RGB_XFERMODE(lighten) { return s + d - SkNf::Min(s*da, d*sa); }
739RGB_XFERMODE(overlay) { return hardlight_kernel(d,da,s,sa); }
740RGB_XFERMODE(softlight) {
741 SkNf m = (da > 0.0f).thenElse(d / da, 0.0f),
742 s2 = 2.0f*s,
743 m4 = 4.0f*m;
744
745 // The logic forks three ways:
746 // 1. dark src?
747 // 2. light src, dark dst?
748 // 3. light src, light dst?
749 SkNf darkSrc = d*(sa + (s2 - sa)*(1.0f - m)), // Used in case 1.
750 darkDst = (m4*m4 + m4)*(m - 1.0f) + 7.0f*m, // Used in case 2.
751 liteDst = m.rsqrt().invert() - m, // Used in case 3.
752 liteSrc = d*sa + da*(s2 - sa) * (4.0f*d <= da).thenElse(darkDst, liteDst); // 2 or 3?
753 return s*inv(da) + d*inv(sa) + (s2 <= sa).thenElse(darkSrc, liteSrc); // 1 or (2 or 3)?
754}
755
Mike Klein2cbc33d2016-11-28 16:30:30 -0500756STAGE(luminance_to_alpha) {
Mike Klein1f49f262016-10-31 19:49:27 -0400757 a = SK_LUM_COEFF_R*r + SK_LUM_COEFF_G*g + SK_LUM_COEFF_B*b;
758 r = g = b = 0;
759}
760
Mike Klein464e6a12017-01-04 11:04:01 -0500761STAGE_CTX(matrix_2x3, const float*) {
762 auto m = ctx;
Mike Klein06a65e22016-11-17 12:39:09 -0500763
Mike Klein87185f72016-12-01 18:22:26 -0500764 auto R = SkNf_fma(r,m[0], SkNf_fma(g,m[2], m[4])),
765 G = SkNf_fma(r,m[1], SkNf_fma(g,m[3], m[5]));
Mike Klein06a65e22016-11-17 12:39:09 -0500766 r = R;
767 g = G;
768}
Mike Klein464e6a12017-01-04 11:04:01 -0500769STAGE_CTX(matrix_3x4, const float*) {
770 auto m = ctx;
raftias25636012016-11-11 15:27:39 -0800771
Mike Klein87185f72016-12-01 18:22:26 -0500772 auto R = SkNf_fma(r,m[0], SkNf_fma(g,m[3], SkNf_fma(b,m[6], m[ 9]))),
773 G = SkNf_fma(r,m[1], SkNf_fma(g,m[4], SkNf_fma(b,m[7], m[10]))),
774 B = SkNf_fma(r,m[2], SkNf_fma(g,m[5], SkNf_fma(b,m[8], m[11])));
raftias25636012016-11-11 15:27:39 -0800775 r = R;
776 g = G;
777 b = B;
778}
Mike Klein464e6a12017-01-04 11:04:01 -0500779STAGE_CTX(matrix_4x5, const float*) {
780 auto m = ctx;
Mike Kleineea7c162016-11-03 10:20:35 -0400781
Mike Klein87185f72016-12-01 18:22:26 -0500782 auto R = SkNf_fma(r,m[0], SkNf_fma(g,m[4], SkNf_fma(b,m[ 8], SkNf_fma(a,m[12], m[16])))),
783 G = SkNf_fma(r,m[1], SkNf_fma(g,m[5], SkNf_fma(b,m[ 9], SkNf_fma(a,m[13], m[17])))),
784 B = SkNf_fma(r,m[2], SkNf_fma(g,m[6], SkNf_fma(b,m[10], SkNf_fma(a,m[14], m[18])))),
785 A = SkNf_fma(r,m[3], SkNf_fma(g,m[7], SkNf_fma(b,m[11], SkNf_fma(a,m[15], m[19]))));
Mike Kleineea7c162016-11-03 10:20:35 -0400786 r = R;
787 g = G;
788 b = B;
789 a = A;
790}
Mike Klein464e6a12017-01-04 11:04:01 -0500791STAGE_CTX(matrix_perspective, const float*) {
Mike Kleinc01e7df2016-11-17 16:27:10 -0500792 // N.B. unlike the matrix_NxM stages, this takes a row-major matrix.
Mike Klein464e6a12017-01-04 11:04:01 -0500793 auto m = ctx;
Mike Kleinc01e7df2016-11-17 16:27:10 -0500794
Mike Klein87185f72016-12-01 18:22:26 -0500795 auto R = SkNf_fma(r,m[0], SkNf_fma(g,m[1], m[2])),
796 G = SkNf_fma(r,m[3], SkNf_fma(g,m[4], m[5])),
797 Z = SkNf_fma(r,m[6], SkNf_fma(g,m[7], m[8]));
Mike Kleinc01e7df2016-11-17 16:27:10 -0500798 r = R * Z.invert();
799 g = G * Z.invert();
800}
801
Mike Kleincfcf6242016-11-16 09:01:30 -0500802SI SkNf parametric(const SkNf& v, const SkColorSpaceTransferFn& p) {
803 float result[N]; // Unconstrained powf() doesn't vectorize well...
804 for (int i = 0; i < N; i++) {
805 float s = v[i];
Matt Sarett24107172016-12-19 14:33:35 -0500806 result[i] = (s <= p.fD) ? p.fC * s + p.fF
807 : powf(s * p.fA + p.fB, p.fG) + p.fE;
Mike Kleincfcf6242016-11-16 09:01:30 -0500808 }
raftias2979b1a2016-12-05 16:30:41 -0500809 // Clamp the output to [0, 1].
810 // Max(NaN, 0) = 0, but Max(0, NaN) = NaN, so we want this exact order to ensure NaN => 0
811 return SkNf::Min(SkNf::Max(SkNf::Load(result), 0.0f), 1.0f);
Mike Kleincfcf6242016-11-16 09:01:30 -0500812}
Mike Klein464e6a12017-01-04 11:04:01 -0500813STAGE_CTX(parametric_r, const SkColorSpaceTransferFn*) { r = parametric(r, *ctx); }
814STAGE_CTX(parametric_g, const SkColorSpaceTransferFn*) { g = parametric(g, *ctx); }
815STAGE_CTX(parametric_b, const SkColorSpaceTransferFn*) { b = parametric(b, *ctx); }
816STAGE_CTX(parametric_a, const SkColorSpaceTransferFn*) { a = parametric(a, *ctx); }
Mike Kleincfcf6242016-11-16 09:01:30 -0500817
Matt Sarettdb4d4062016-11-16 16:07:15 -0500818SI SkNf table(const SkNf& v, const SkTableTransferFn& table) {
819 float result[N];
Mike Kleincfcf6242016-11-16 09:01:30 -0500820 for (int i = 0; i < N; i++) {
Matt Sarettdb4d4062016-11-16 16:07:15 -0500821 result[i] = interp_lut(v[i], table.fData, table.fSize);
Mike Kleincfcf6242016-11-16 09:01:30 -0500822 }
raftias2979b1a2016-12-05 16:30:41 -0500823 // no need to clamp - tables are by-design [0,1] -> [0,1]
Mike Kleincfcf6242016-11-16 09:01:30 -0500824 return SkNf::Load(result);
825}
Mike Klein464e6a12017-01-04 11:04:01 -0500826STAGE_CTX(table_r, const SkTableTransferFn*) { r = table(r, *ctx); }
827STAGE_CTX(table_g, const SkTableTransferFn*) { g = table(g, *ctx); }
828STAGE_CTX(table_b, const SkTableTransferFn*) { b = table(b, *ctx); }
829STAGE_CTX(table_a, const SkTableTransferFn*) { a = table(a, *ctx); }
raftias25636012016-11-11 15:27:39 -0800830
Mike Klein464e6a12017-01-04 11:04:01 -0500831STAGE_CTX(color_lookup_table, const SkColorLookUpTable*) {
832 const SkColorLookUpTable* colorLUT = ctx;
raftias54761282016-12-01 13:44:07 -0500833 SkASSERT(3 == colorLUT->inputChannels() || 4 == colorLUT->inputChannels());
834 SkASSERT(3 == colorLUT->outputChannels());
raftias25636012016-11-11 15:27:39 -0800835 float result[3][N];
836 for (int i = 0; i < N; ++i) {
raftias54761282016-12-01 13:44:07 -0500837 const float in[4] = { r[i], g[i], b[i], a[i] };
838 float out[3];
839 colorLUT->interp(out, in);
840 for (int j = 0; j < colorLUT->outputChannels(); ++j) {
841 result[j][i] = out[j];
842 }
raftias25636012016-11-11 15:27:39 -0800843 }
844 r = SkNf::Load(result[0]);
845 g = SkNf::Load(result[1]);
846 b = SkNf::Load(result[2]);
raftias54761282016-12-01 13:44:07 -0500847 if (4 == colorLUT->inputChannels()) {
848 // we must set the pixel to opaque, as the alpha channel was used
849 // as input before this.
850 a = 1.f;
851 }
raftias25636012016-11-11 15:27:39 -0800852}
853
Mike Klein2cbc33d2016-11-28 16:30:30 -0500854STAGE(lab_to_xyz) {
raftias25636012016-11-11 15:27:39 -0800855 const auto lab_l = r * 100.0f;
856 const auto lab_a = g * 255.0f - 128.0f;
857 const auto lab_b = b * 255.0f - 128.0f;
858 auto Y = (lab_l + 16.0f) * (1/116.0f);
859 auto X = lab_a * (1/500.0f) + Y;
860 auto Z = Y - (lab_b * (1/200.0f));
861
862 const auto X3 = X*X*X;
863 X = (X3 > 0.008856f).thenElse(X3, (X - (16/116.0f)) * (1/7.787f));
864 const auto Y3 = Y*Y*Y;
865 Y = (Y3 > 0.008856f).thenElse(Y3, (Y - (16/116.0f)) * (1/7.787f));
866 const auto Z3 = Z*Z*Z;
867 Z = (Z3 > 0.008856f).thenElse(Z3, (Z - (16/116.0f)) * (1/7.787f));
868
869 // adjust to D50 illuminant
870 X *= 0.96422f;
871 Y *= 1.00000f;
872 Z *= 0.82521f;
873
874 r = X;
875 g = Y;
876 b = Z;
877}
878
Mike Kleinb273fc42016-11-17 15:42:22 -0500879SI SkNf assert_in_tile(const SkNf& v, float limit) {
880 for (int i = 0; i < N; i++) {
881 SkASSERT(0 <= v[i] && v[i] < limit);
882 }
883 return v;
Mike Klein06a65e22016-11-17 12:39:09 -0500884}
Mike Kleinb273fc42016-11-17 15:42:22 -0500885
Florin Malita5b2f5792017-01-20 14:53:03 -0500886SI SkNf ulp_before(float v) {
887 SkASSERT(v > 0);
888 SkNf vs(v);
889 SkNu uvs = SkNu::Load(&vs) - 1;
890 return SkNf::Load(&uvs);
891}
892
Mike Kleinb273fc42016-11-17 15:42:22 -0500893SI SkNf clamp(const SkNf& v, float limit) {
Florin Malita5b2f5792017-01-20 14:53:03 -0500894 SkNf result = SkNf::Max(0, SkNf::Min(v, ulp_before(limit)));
Mike Kleinb273fc42016-11-17 15:42:22 -0500895 return assert_in_tile(result, limit);
Mike Klein06a65e22016-11-17 12:39:09 -0500896}
Mike Kleinb273fc42016-11-17 15:42:22 -0500897SI SkNf repeat(const SkNf& v, float limit) {
898 SkNf result = v - (v/limit).floor()*limit;
Mike Kleinb273fc42016-11-17 15:42:22 -0500899 // For small negative v, (v/limit).floor()*limit can dominate v in the subtraction,
900 // which leaves result == limit. We want result < limit, so clamp it one ULP.
Florin Malita5b2f5792017-01-20 14:53:03 -0500901 result = SkNf::Min(result, ulp_before(limit));
Mike Kleinb273fc42016-11-17 15:42:22 -0500902 return assert_in_tile(result, limit);
903}
Mike Klein2e35e8a2016-11-18 15:47:22 -0500904SI SkNf mirror(const SkNf& v, float l/*imit*/) {
905 SkNf result = ((v - l) - ((v - l) / (2*l)).floor()*(2*l) - l).abs();
906 // Same deal as repeat.
Florin Malita5b2f5792017-01-20 14:53:03 -0500907 result = SkNf::Min(result, ulp_before(l));
Mike Klein2e35e8a2016-11-18 15:47:22 -0500908 return assert_in_tile(result, l);
909}
Mike Klein464e6a12017-01-04 11:04:01 -0500910STAGE_CTX( clamp_x, const float*) { r = clamp (r, *ctx); }
911STAGE_CTX(repeat_x, const float*) { r = repeat(r, *ctx); }
912STAGE_CTX(mirror_x, const float*) { r = mirror(r, *ctx); }
913STAGE_CTX( clamp_y, const float*) { g = clamp (g, *ctx); }
914STAGE_CTX(repeat_y, const float*) { g = repeat(g, *ctx); }
915STAGE_CTX(mirror_y, const float*) { g = mirror(g, *ctx); }
Mike Klein06a65e22016-11-17 12:39:09 -0500916
Mike Klein464e6a12017-01-04 11:04:01 -0500917STAGE_CTX(save_xy, SkImageShaderContext*) {
918 r.store(ctx->x);
919 g.store(ctx->y);
Mike Klein46e66a22016-11-21 16:19:34 -0500920
Mike Kleinb0b17d12016-12-09 16:25:44 -0500921 // Whether bilinear or bicubic, all sample points have the same fractional offset (fx,fy).
922 // They're either the 4 corners of a logical 1x1 pixel or the 16 corners of a 3x3 grid
923 // surrounding (x,y), all (0.5,0.5) off-center.
Mike Klein886cf532016-12-06 11:31:25 -0500924 auto fract = [](const SkNf& v) { return v - v.floor(); };
Mike Klein464e6a12017-01-04 11:04:01 -0500925 fract(r + 0.5f).store(ctx->fx);
926 fract(g + 0.5f).store(ctx->fy);
Mike Klein886cf532016-12-06 11:31:25 -0500927}
Mike Klein46e66a22016-11-21 16:19:34 -0500928
Mike Klein464e6a12017-01-04 11:04:01 -0500929STAGE_CTX(accumulate, const SkImageShaderContext*) {
Mike Kleinb0b17d12016-12-09 16:25:44 -0500930 // Bilinear and bicubic filtering are both separable, so we'll end up with independent
931 // scale contributions in x and y that we multiply together to get each pixel's scale factor.
Mike Klein464e6a12017-01-04 11:04:01 -0500932 auto scale = SkNf::Load(ctx->scalex) * SkNf::Load(ctx->scaley);
Mike Klein87185f72016-12-01 18:22:26 -0500933 dr = SkNf_fma(scale, r, dr);
934 dg = SkNf_fma(scale, g, dg);
935 db = SkNf_fma(scale, b, db);
936 da = SkNf_fma(scale, a, da);
Mike Kleinb04c3522016-11-28 11:55:58 -0500937}
938
Mike Kleinb0b17d12016-12-09 16:25:44 -0500939// In bilinear interpolation, the 4 pixels at +/- 0.5 offsets from the sample pixel center
940// are combined in direct proportion to their area overlapping that logical query pixel.
941// At positive offsets, the x-axis contribution to that rectangular area is fx; (1-fx)
942// at negative x offsets. The y-axis is treated symmetrically.
943template <int Scale>
Mike Klein464e6a12017-01-04 11:04:01 -0500944SI void bilinear_x(SkImageShaderContext* ctx, SkNf* x) {
945 *x = SkNf::Load(ctx->x) + Scale*0.5f;
946 auto fx = SkNf::Load(ctx->fx);
947 (Scale > 0 ? fx : (1.0f - fx)).store(ctx->scalex);
Mike Kleinb0b17d12016-12-09 16:25:44 -0500948}
949template <int Scale>
Mike Klein464e6a12017-01-04 11:04:01 -0500950SI void bilinear_y(SkImageShaderContext* ctx, SkNf* y) {
951 *y = SkNf::Load(ctx->y) + Scale*0.5f;
952 auto fy = SkNf::Load(ctx->fy);
953 (Scale > 0 ? fy : (1.0f - fy)).store(ctx->scaley);
Mike Kleinb0b17d12016-12-09 16:25:44 -0500954}
Mike Klein464e6a12017-01-04 11:04:01 -0500955STAGE_CTX(bilinear_nx, SkImageShaderContext*) { bilinear_x<-1>(ctx, &r); }
956STAGE_CTX(bilinear_px, SkImageShaderContext*) { bilinear_x<+1>(ctx, &r); }
957STAGE_CTX(bilinear_ny, SkImageShaderContext*) { bilinear_y<-1>(ctx, &g); }
958STAGE_CTX(bilinear_py, SkImageShaderContext*) { bilinear_y<+1>(ctx, &g); }
Mike Kleinb0b17d12016-12-09 16:25:44 -0500959
960
961// In bilinear interpolation, the 16 pixels at +/- 0.5 and +/- 1.5 offsets from the sample
962// pixel center are combined with a non-uniform cubic filter, with high filter values near
963// the center and lower values farther away.
964//
965// We break this filter function into two parts, one for near +/- 0.5 offsets,
966// and one for far +/- 1.5 offsets.
967//
968// See GrBicubicEffect for details about this particular Mitchell-Netravali filter.
969SI SkNf bicubic_near(const SkNf& t) {
970 // 1/18 + 9/18t + 27/18t^2 - 21/18t^3 == t ( t ( -21/18t + 27/18) + 9/18) + 1/18
971 return SkNf_fma(t, SkNf_fma(t, SkNf_fma(-21/18.0f, t, 27/18.0f), 9/18.0f), 1/18.0f);
972}
973SI SkNf bicubic_far(const SkNf& t) {
974 // 0/18 + 0/18*t - 6/18t^2 + 7/18t^3 == t^2 (7/18t - 6/18)
975 return (t*t)*SkNf_fma(7/18.0f, t, -6/18.0f);
976}
977
978template <int Scale>
Mike Klein464e6a12017-01-04 11:04:01 -0500979SI void bicubic_x(SkImageShaderContext* ctx, SkNf* x) {
980 *x = SkNf::Load(ctx->x) + Scale*0.5f;
981 auto fx = SkNf::Load(ctx->fx);
982 if (Scale == -3) { return bicubic_far (1.0f - fx).store(ctx->scalex); }
983 if (Scale == -1) { return bicubic_near(1.0f - fx).store(ctx->scalex); }
984 if (Scale == +1) { return bicubic_near( fx).store(ctx->scalex); }
985 if (Scale == +3) { return bicubic_far ( fx).store(ctx->scalex); }
Mike Kleinb0b17d12016-12-09 16:25:44 -0500986 SkDEBUGFAIL("unreachable");
987}
988template <int Scale>
Mike Klein464e6a12017-01-04 11:04:01 -0500989SI void bicubic_y(SkImageShaderContext* ctx, SkNf* y) {
990 *y = SkNf::Load(ctx->y) + Scale*0.5f;
991 auto fy = SkNf::Load(ctx->fy);
992 if (Scale == -3) { return bicubic_far (1.0f - fy).store(ctx->scaley); }
993 if (Scale == -1) { return bicubic_near(1.0f - fy).store(ctx->scaley); }
994 if (Scale == +1) { return bicubic_near( fy).store(ctx->scaley); }
995 if (Scale == +3) { return bicubic_far ( fy).store(ctx->scaley); }
Mike Kleinb0b17d12016-12-09 16:25:44 -0500996 SkDEBUGFAIL("unreachable");
997}
Mike Klein464e6a12017-01-04 11:04:01 -0500998STAGE_CTX(bicubic_n3x, SkImageShaderContext*) { bicubic_x<-3>(ctx, &r); }
999STAGE_CTX(bicubic_n1x, SkImageShaderContext*) { bicubic_x<-1>(ctx, &r); }
1000STAGE_CTX(bicubic_p1x, SkImageShaderContext*) { bicubic_x<+1>(ctx, &r); }
1001STAGE_CTX(bicubic_p3x, SkImageShaderContext*) { bicubic_x<+3>(ctx, &r); }
Mike Kleinb0b17d12016-12-09 16:25:44 -05001002
Mike Klein464e6a12017-01-04 11:04:01 -05001003STAGE_CTX(bicubic_n3y, SkImageShaderContext*) { bicubic_y<-3>(ctx, &g); }
1004STAGE_CTX(bicubic_n1y, SkImageShaderContext*) { bicubic_y<-1>(ctx, &g); }
1005STAGE_CTX(bicubic_p1y, SkImageShaderContext*) { bicubic_y<+1>(ctx, &g); }
1006STAGE_CTX(bicubic_p3y, SkImageShaderContext*) { bicubic_y<+3>(ctx, &g); }
Mike Kleinb0b17d12016-12-09 16:25:44 -05001007
1008
Mike Kleincb2c12b2016-11-22 13:22:48 -05001009template <typename T>
Mike Klein464e6a12017-01-04 11:04:01 -05001010SI SkNi offset_and_ptr(T** ptr, const SkImageShaderContext* ctx, const SkNf& x, const SkNf& y) {
Mike Kleincb2c12b2016-11-22 13:22:48 -05001011 SkNi ix = SkNx_cast<int>(x),
1012 iy = SkNx_cast<int>(y);
Mike Klein464e6a12017-01-04 11:04:01 -05001013 SkNi offset = iy*ctx->stride + ix;
Mike Klein06a65e22016-11-17 12:39:09 -05001014
Mike Klein464e6a12017-01-04 11:04:01 -05001015 *ptr = (const T*)ctx->pixels;
Mike Kleincb2c12b2016-11-22 13:22:48 -05001016 return offset;
1017}
1018
Mike Klein464e6a12017-01-04 11:04:01 -05001019STAGE_CTX(gather_a8, const SkImageShaderContext*) {
Mike Klein7a14734d2016-11-29 15:33:39 -05001020 const uint8_t* p;
1021 SkNi offset = offset_and_ptr(&p, ctx, r, g);
1022
1023 r = g = b = 0.0f;
Mike Kleine2e2ae22016-12-02 15:21:03 -05001024 a = SkNf_from_byte(gather(tail, p, offset));
Mike Klein7a14734d2016-11-29 15:33:39 -05001025}
Mike Klein464e6a12017-01-04 11:04:01 -05001026STAGE_CTX(gather_i8, const SkImageShaderContext*) {
Mike Kleinf7657e92016-11-29 12:57:22 -05001027 const uint8_t* p;
Mike Klein464e6a12017-01-04 11:04:01 -05001028 SkNi offset = offset_and_ptr(&p, ctx, r, g);
Mike Kleinf7657e92016-11-29 12:57:22 -05001029
1030 SkNi ix = SkNx_cast<int>(gather(tail, p, offset));
Mike Klein464e6a12017-01-04 11:04:01 -05001031 from_8888(gather(tail, ctx->ctable->readColors(), ix), &r, &g, &b, &a);
Mike Kleinf7657e92016-11-29 12:57:22 -05001032}
Mike Klein464e6a12017-01-04 11:04:01 -05001033STAGE_CTX(gather_g8, const SkImageShaderContext*) {
Mike Klein6b77f1c2016-11-22 15:50:12 -05001034 const uint8_t* p;
1035 SkNi offset = offset_and_ptr(&p, ctx, r, g);
1036
Mike Kleine2e2ae22016-12-02 15:21:03 -05001037 r = g = b = SkNf_from_byte(gather(tail, p, offset));
Mike Kleinb04c3522016-11-28 11:55:58 -05001038 a = 1.0f;
Mike Klein6b77f1c2016-11-22 15:50:12 -05001039}
Mike Klein464e6a12017-01-04 11:04:01 -05001040STAGE_CTX(gather_565, const SkImageShaderContext*) {
Mike Kleincb2c12b2016-11-22 13:22:48 -05001041 const uint16_t* p;
1042 SkNi offset = offset_and_ptr(&p, ctx, r, g);
1043
Mike Klein56b50792016-11-29 08:14:49 -05001044 from_565(gather(tail, p, offset), &r, &g, &b);
Mike Kleinb04c3522016-11-28 11:55:58 -05001045 a = 1.0f;
Mike Kleincb2c12b2016-11-22 13:22:48 -05001046}
Mike Klein464e6a12017-01-04 11:04:01 -05001047STAGE_CTX(gather_4444, const SkImageShaderContext*) {
Mike Kleincb5338c2016-11-22 14:58:45 -05001048 const uint16_t* p;
1049 SkNi offset = offset_and_ptr(&p, ctx, r, g);
1050
Mike Klein56b50792016-11-29 08:14:49 -05001051 from_4444(gather(tail, p, offset), &r, &g, &b, &a);
Mike Kleincb5338c2016-11-22 14:58:45 -05001052}
Mike Klein464e6a12017-01-04 11:04:01 -05001053STAGE_CTX(gather_8888, const SkImageShaderContext*) {
Mike Kleincb5338c2016-11-22 14:58:45 -05001054 const uint32_t* p;
1055 SkNi offset = offset_and_ptr(&p, ctx, r, g);
1056
Mike Klein56b50792016-11-29 08:14:49 -05001057 from_8888(gather(tail, p, offset), &r, &g, &b, &a);
Mike Kleincb5338c2016-11-22 14:58:45 -05001058}
Mike Klein464e6a12017-01-04 11:04:01 -05001059STAGE_CTX(gather_f16, const SkImageShaderContext*) {
Mike Kleincb2c12b2016-11-22 13:22:48 -05001060 const uint64_t* p;
1061 SkNi offset = offset_and_ptr(&p, ctx, r, g);
1062
Mike Klein49580062016-12-11 11:42:07 -05001063 auto px = gather(tail, p, offset);
1064 from_f16(&px, &r, &g, &b, &a);
Mike Kleincb2c12b2016-11-22 13:22:48 -05001065}
1066
Florin Malitac86e4702017-01-20 08:41:34 -05001067STAGE_CTX(linear_gradient_2stops, const SkPM4f*) {
1068 auto t = r;
1069 SkPM4f c0 = ctx[0],
1070 dc = ctx[1];
1071
1072 r = SkNf_fma(t, dc.r(), c0.r());
1073 g = SkNf_fma(t, dc.g(), c0.g());
1074 b = SkNf_fma(t, dc.b(), c0.b());
1075 a = SkNf_fma(t, dc.a(), c0.a());
1076}
Mike Klein06a65e22016-11-17 12:39:09 -05001077
Mike Kleina9e8ef02017-01-23 14:06:55 -05001078STAGE_CTX(byte_tables, const void*) {
1079 struct Tables { const uint8_t *r, *g, *b, *a; };
1080 auto tables = (const Tables*)ctx;
1081
1082 r = SkNf_from_byte(gather(tail, tables->r, SkNf_round(255.0f, r)));
1083 g = SkNf_from_byte(gather(tail, tables->g, SkNf_round(255.0f, g)));
1084 b = SkNf_from_byte(gather(tail, tables->b, SkNf_round(255.0f, b)));
1085 a = SkNf_from_byte(gather(tail, tables->a, SkNf_round(255.0f, a)));
1086}
1087
Mike Kleinaebfb452016-10-25 10:27:33 -04001088SI Fn enum_to_Fn(SkRasterPipeline::StockStage st) {
1089 switch (st) {
1090 #define M(stage) case SkRasterPipeline::stage: return stage;
1091 SK_RASTER_PIPELINE_STAGES(M)
1092 #undef M
1093 }
1094 SkASSERT(false);
1095 return just_return;
1096}
Mike Klein9161ef02016-10-04 14:03:27 -04001097
Mike Klein0c324962016-12-01 14:05:38 -05001098namespace {
Mike Klein464e6a12017-01-04 11:04:01 -05001099
Mike Kleina2d25ec2017-01-05 15:03:53 -05001100 static void build_program(void** program, const SkRasterPipeline::Stage* stages, int nstages) {
1101 for (int i = 0; i < nstages; i++) {
1102 *program++ = (void*)enum_to_Fn(stages[i].stage);
1103 if (stages[i].ctx) {
1104 *program++ = stages[i].ctx;
1105 }
1106 }
1107 *program++ = (void*)just_return;
1108 }
1109
Mike Klein319ba3d2017-01-20 15:11:54 -05001110 static void run_program(void** program, size_t x, size_t n) {
1111 SkNf u; // fastest to start uninitialized.
Mike Kleina2d25ec2017-01-05 15:03:53 -05001112
1113 auto start = (Fn)load_and_increment(&program);
1114 while (n >= N) {
Mike Klein319ba3d2017-01-20 15:11:54 -05001115 start(x*N, program, u,u,u,u, u,u,u,u);
Mike Kleina2d25ec2017-01-05 15:03:53 -05001116 x += N;
1117 n -= N;
1118 }
1119 if (n) {
Mike Klein319ba3d2017-01-20 15:11:54 -05001120 start(x*N+n, program, u,u,u,u, u,u,u,u);
Mike Kleina2d25ec2017-01-05 15:03:53 -05001121 }
1122 }
1123
Mike Klein464e6a12017-01-04 11:04:01 -05001124 // Compiled manages its memory manually because it's not safe to use
1125 // std::vector, SkTDArray, etc without setting us up for big ODR violations.
Mike Klein0c324962016-12-01 14:05:38 -05001126 struct Compiled {
Mike Klein464e6a12017-01-04 11:04:01 -05001127 Compiled(const SkRasterPipeline::Stage* stages, int nstages) {
1128 int slots = nstages + 1; // One extra for just_return.
1129 for (int i = 0; i < nstages; i++) {
1130 if (stages[i].ctx) {
1131 slots++;
1132 }
Mike Klein0c324962016-12-01 14:05:38 -05001133 }
Mike Klein464e6a12017-01-04 11:04:01 -05001134 fProgram = (void**)sk_malloc_throw(slots * sizeof(void*));
Mike Kleina2d25ec2017-01-05 15:03:53 -05001135 build_program(fProgram, stages, nstages);
Mike Klein464e6a12017-01-04 11:04:01 -05001136 }
1137 ~Compiled() { sk_free(fProgram); }
1138
1139 Compiled(const Compiled& o) {
1140 int slots = 0;
1141 while (o.fProgram[slots++] != (void*)just_return);
1142
1143 fProgram = (void**)sk_malloc_throw(slots * sizeof(void*));
1144 memcpy(fProgram, o.fProgram, slots * sizeof(void*));
Mike Klein0c324962016-12-01 14:05:38 -05001145 }
1146
Mike Klein319ba3d2017-01-20 15:11:54 -05001147 void operator()(size_t x, size_t n) {
1148 run_program(fProgram, x, n);
Mike Klein0c324962016-12-01 14:05:38 -05001149 }
1150
Mike Klein464e6a12017-01-04 11:04:01 -05001151 void** fProgram;
Mike Klein0c324962016-12-01 14:05:38 -05001152 };
1153}
1154
Mike Kleinbaaf8ad2016-09-29 09:04:15 -04001155namespace SK_OPTS_NS {
1156
Mike Klein319ba3d2017-01-20 15:11:54 -05001157 SI std::function<void(size_t, size_t)>
Mike Kleinaf49b192016-11-15 08:52:04 -05001158 compile_pipeline(const SkRasterPipeline::Stage* stages, int nstages) {
Mike Klein0c324962016-12-01 14:05:38 -05001159 return Compiled{stages,nstages};
Mike Kleinbaaf8ad2016-09-29 09:04:15 -04001160 }
1161
Mike Klein319ba3d2017-01-20 15:11:54 -05001162 SI void run_pipeline(size_t x, size_t n,
Mike Kleinc789b612016-11-30 13:45:06 -05001163 const SkRasterPipeline::Stage* stages, int nstages) {
Mike Kleina2d25ec2017-01-05 15:03:53 -05001164 static const int kStackMax = 256;
1165 // Worst case is nstages stages with nstages context pointers, and just_return.
1166 if (2*nstages+1 <= kStackMax) {
1167 void* program[kStackMax];
1168 build_program(program, stages, nstages);
Mike Klein319ba3d2017-01-20 15:11:54 -05001169 run_program(program, x,n);
Mike Kleina2d25ec2017-01-05 15:03:53 -05001170 } else {
Mike Klein319ba3d2017-01-20 15:11:54 -05001171 Compiled{stages,nstages}(x,n);
Mike Kleina2d25ec2017-01-05 15:03:53 -05001172 }
Mike Kleinc789b612016-11-30 13:45:06 -05001173 }
1174
Mike Kleinaebfb452016-10-25 10:27:33 -04001175} // namespace SK_OPTS_NS
Mike Kleinbaaf8ad2016-09-29 09:04:15 -04001176
Mike Klein04adfda2016-10-12 09:52:55 -04001177#undef SI
1178#undef STAGE
Mike Klein464e6a12017-01-04 11:04:01 -05001179#undef STAGE_CTX
Mike Klein04adfda2016-10-12 09:52:55 -04001180#undef RGBA_XFERMODE
1181#undef RGB_XFERMODE
Mike Klein9161ef02016-10-04 14:03:27 -04001182
Mike Kleinbaaf8ad2016-09-29 09:04:15 -04001183#endif//SkRasterPipeline_opts_DEFINED