blob: 692530828f6bd8ef9d84f854dd689b727f41f1f5 [file] [log] [blame]
Mike Kleine1caee12017-02-15 13:31:12 -05001/*
Mike Klein1b9b7d52018-02-27 10:37:40 -05002 * Copyright 2018 Google Inc.
Mike Kleine1caee12017-02-15 13:31:12 -05003 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
Mike Klein1b9b7d52018-02-27 10:37:40 -05008#ifndef SkRasterPipeline_opts_DEFINED
9#define SkRasterPipeline_opts_DEFINED
Mike Kleinb9c4a6f2017-04-03 13:54:55 -040010
Mike Kleinc0bd9f92019-04-23 12:05:21 -050011#include "include/core/SkTypes.h"
Mike Klein7a177b42019-06-17 17:17:47 -050012#include "src/core/SkUtils.h" // unaligned_{load,store}
Brian Osman2b1a5442019-06-19 11:40:33 -040013#include "src/sksl/SkSLByteCode.h"
Mike Kleinb11ab572018-10-24 06:42:14 -040014
15// Every function in this file should be marked static and inline using SI.
16#if defined(__clang__)
17 #define SI __attribute__((always_inline)) static inline
18#else
19 #define SI static inline
20#endif
21
Mike Kleinb11ab572018-10-24 06:42:14 -040022template <typename Dst, typename Src>
23SI Dst bit_cast(const Src& src) {
24 static_assert(sizeof(Dst) == sizeof(Src), "");
Mike Klein7a177b42019-06-17 17:17:47 -050025 return sk_unaligned_load<Dst>(&src);
Mike Kleinb11ab572018-10-24 06:42:14 -040026}
27
28template <typename Dst, typename Src>
29SI Dst widen_cast(const Src& src) {
30 static_assert(sizeof(Dst) > sizeof(Src), "");
31 Dst dst;
32 memcpy(&dst, &src, sizeof(Src));
33 return dst;
34}
35
36// Our program is an array of void*, either
37// - 1 void* per stage with no context pointer, the next stage;
38// - 2 void* per stage with a context pointer, first the context pointer, then the next stage.
39
40// load_and_inc() steps the program forward by 1 void*, returning that pointer.
41SI void* load_and_inc(void**& program) {
42#if defined(__GNUC__) && defined(__x86_64__)
43 // If program is in %rsi (we try to make this likely) then this is a single instruction.
44 void* rax;
45 asm("lodsq" : "=a"(rax), "+S"(program)); // Write-only %rax, read-write %rsi.
46 return rax;
47#else
48 // On ARM *program++ compiles into pretty ideal code without any handholding.
49 return *program++;
50#endif
51}
52
53// Lazily resolved on first cast. Does nothing if cast to Ctx::None.
54struct Ctx {
55 struct None {};
56
57 void* ptr;
58 void**& program;
59
60 explicit Ctx(void**& p) : ptr(nullptr), program(p) {}
61
62 template <typename T>
63 operator T*() {
64 if (!ptr) { ptr = load_and_inc(program); }
65 return (T*)ptr;
66 }
67 operator None() { return None{}; }
68};
69
Mike Kleinadc78d52018-01-01 09:06:37 -050070
71#if !defined(__clang__)
72 #define JUMPER_IS_SCALAR
Mike Klein15eb1e92018-08-31 11:21:27 -040073#elif defined(SK_ARM_HAS_NEON)
Mike Kleinadc78d52018-01-01 09:06:37 -050074 #define JUMPER_IS_NEON
Mike Klein15eb1e92018-08-31 11:21:27 -040075#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX512
Mike Kleinadc78d52018-01-01 09:06:37 -050076 #define JUMPER_IS_AVX512
Mike Klein15eb1e92018-08-31 11:21:27 -040077#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
Mike Kleinadc78d52018-01-01 09:06:37 -050078 #define JUMPER_IS_HSW
Mike Klein15eb1e92018-08-31 11:21:27 -040079#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX
Mike Kleinadc78d52018-01-01 09:06:37 -050080 #define JUMPER_IS_AVX
Mike Klein15eb1e92018-08-31 11:21:27 -040081#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
Mike Kleinadc78d52018-01-01 09:06:37 -050082 #define JUMPER_IS_SSE41
Mike Klein15eb1e92018-08-31 11:21:27 -040083#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
Mike Kleinadc78d52018-01-01 09:06:37 -050084 #define JUMPER_IS_SSE2
85#else
86 #define JUMPER_IS_SCALAR
87#endif
88
89// Older Clangs seem to crash when generating non-optimized NEON code for ARMv7.
Mike Klein15eb1e92018-08-31 11:21:27 -040090#if defined(__clang__) && !defined(__OPTIMIZE__) && defined(SK_CPU_ARM32)
Mike Kleinadc78d52018-01-01 09:06:37 -050091 // Apple Clang 9 and vanilla Clang 5 are fine, and may even be conservative.
92 #if defined(__apple_build_version__) && __clang_major__ < 9
93 #define JUMPER_IS_SCALAR
94 #elif __clang_major__ < 5
95 #define JUMPER_IS_SCALAR
96 #endif
Mike Kleinb54d2232018-06-01 15:53:21 -040097
98 #if defined(JUMPER_IS_NEON) && defined(JUMPER_IS_SCALAR)
99 #undef JUMPER_IS_NEON
100 #endif
Mike Kleinadc78d52018-01-01 09:06:37 -0500101#endif
102
103#if defined(JUMPER_IS_SCALAR)
Mike Klein5cc94cc2018-03-07 17:04:18 +0000104 #include <math.h>
Mike Klein1b9b7d52018-02-27 10:37:40 -0500105#elif defined(JUMPER_IS_NEON)
106 #include <arm_neon.h>
107#else
108 #include <immintrin.h>
109#endif
Mike Klein5cc94cc2018-03-07 17:04:18 +0000110
Mike Klein1b9b7d52018-02-27 10:37:40 -0500111namespace SK_OPTS_NS {
112
113#if defined(JUMPER_IS_SCALAR)
114 // This path should lead to portable scalar code.
Mike Kleinadc78d52018-01-01 09:06:37 -0500115 using F = float ;
116 using I32 = int32_t;
117 using U64 = uint64_t;
118 using U32 = uint32_t;
119 using U16 = uint16_t;
120 using U8 = uint8_t ;
121
122 SI F mad(F f, F m, F a) { return f*m+a; }
123 SI F min(F a, F b) { return fminf(a,b); }
124 SI F max(F a, F b) { return fmaxf(a,b); }
125 SI F abs_ (F v) { return fabsf(v); }
126 SI F floor_(F v) { return floorf(v); }
127 SI F rcp (F v) { return 1.0f / v; }
128 SI F rsqrt (F v) { return 1.0f / sqrtf(v); }
129 SI F sqrt_(F v) { return sqrtf(v); }
130 SI U32 round (F v, F scale) { return (uint32_t)(v*scale + 0.5f); }
131 SI U16 pack(U32 v) { return (U16)v; }
132 SI U8 pack(U16 v) { return (U8)v; }
133
134 SI F if_then_else(I32 c, F t, F e) { return c ? t : e; }
135
136 template <typename T>
137 SI T gather(const T* p, U32 ix) { return p[ix]; }
138
Brian Salomon217522c2019-06-11 15:55:30 -0400139 SI void load2(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
140 *r = ptr[0];
141 *g = ptr[1];
142 }
143 SI void store2(uint16_t* ptr, size_t tail, U16 r, U16 g) {
144 ptr[0] = r;
145 ptr[1] = g;
146 }
Mike Kleinadc78d52018-01-01 09:06:37 -0500147 SI void load3(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
148 *r = ptr[0];
149 *g = ptr[1];
150 *b = ptr[2];
151 }
152 SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
153 *r = ptr[0];
154 *g = ptr[1];
155 *b = ptr[2];
156 *a = ptr[3];
157 }
158 SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
159 ptr[0] = r;
160 ptr[1] = g;
161 ptr[2] = b;
162 ptr[3] = a;
163 }
164
Brian Salomon217522c2019-06-11 15:55:30 -0400165 SI void load2(const float* ptr, size_t tail, F* r, F* g) {
166 *r = ptr[0];
167 *g = ptr[1];
168 }
169 SI void store2(float* ptr, size_t tail, F r, F g) {
170 ptr[0] = r;
171 ptr[1] = g;
172 }
Mike Kleinadc78d52018-01-01 09:06:37 -0500173 SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
174 *r = ptr[0];
175 *g = ptr[1];
176 *b = ptr[2];
177 *a = ptr[3];
178 }
179 SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
180 ptr[0] = r;
181 ptr[1] = g;
182 ptr[2] = b;
183 ptr[3] = a;
184 }
185
186#elif defined(JUMPER_IS_NEON)
Mike Kleinadc78d52018-01-01 09:06:37 -0500187 // Since we know we're using Clang, we can use its vector extensions.
188 template <typename T> using V = T __attribute__((ext_vector_type(4)));
189 using F = V<float >;
190 using I32 = V< int32_t>;
191 using U64 = V<uint64_t>;
192 using U32 = V<uint32_t>;
193 using U16 = V<uint16_t>;
194 using U8 = V<uint8_t >;
195
196 // We polyfill a few routines that Clang doesn't build into ext_vector_types.
197 SI F min(F a, F b) { return vminq_f32(a,b); }
198 SI F max(F a, F b) { return vmaxq_f32(a,b); }
199 SI F abs_ (F v) { return vabsq_f32(v); }
200 SI F rcp (F v) { auto e = vrecpeq_f32 (v); return vrecpsq_f32 (v,e ) * e; }
201 SI F rsqrt (F v) { auto e = vrsqrteq_f32(v); return vrsqrtsq_f32(v,e*e) * e; }
202 SI U16 pack(U32 v) { return __builtin_convertvector(v, U16); }
203 SI U8 pack(U16 v) { return __builtin_convertvector(v, U8); }
204
205 SI F if_then_else(I32 c, F t, F e) { return vbslq_f32((U32)c,t,e); }
206
Mike Klein15eb1e92018-08-31 11:21:27 -0400207 #if defined(SK_CPU_ARM64)
Mike Kleinadc78d52018-01-01 09:06:37 -0500208 SI F mad(F f, F m, F a) { return vfmaq_f32(a,f,m); }
209 SI F floor_(F v) { return vrndmq_f32(v); }
210 SI F sqrt_(F v) { return vsqrtq_f32(v); }
211 SI U32 round(F v, F scale) { return vcvtnq_u32_f32(v*scale); }
212 #else
213 SI F mad(F f, F m, F a) { return vmlaq_f32(a,f,m); }
214 SI F floor_(F v) {
215 F roundtrip = vcvtq_f32_s32(vcvtq_s32_f32(v));
216 return roundtrip - if_then_else(roundtrip > v, 1, 0);
217 }
218
219 SI F sqrt_(F v) {
220 auto e = vrsqrteq_f32(v); // Estimate and two refinement steps for e = rsqrt(v).
221 e *= vrsqrtsq_f32(v,e*e);
222 e *= vrsqrtsq_f32(v,e*e);
223 return v*e; // sqrt(v) == v*rsqrt(v).
224 }
225
226 SI U32 round(F v, F scale) {
227 return vcvtq_u32_f32(mad(v,scale,0.5f));
228 }
229 #endif
230
231
232 template <typename T>
233 SI V<T> gather(const T* p, U32 ix) {
234 return {p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]]};
235 }
Brian Salomon217522c2019-06-11 15:55:30 -0400236 SI void load2(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
237 uint16x4x2_t rg;
238 if (__builtin_expect(tail,0)) {
239 if ( true ) { rg = vld2_lane_u16(ptr + 0, rg, 0); }
240 if (tail > 1) { rg = vld2_lane_u16(ptr + 2, rg, 1); }
241 if (tail > 2) { rg = vld2_lane_u16(ptr + 4, rg, 2); }
242 } else {
243 rg = vld2_u16(ptr);
244 }
245 *r = rg.val[0];
246 *g = rg.val[1];
247 }
248 SI void store2(uint16_t* ptr, size_t tail, U16 r, U16 g) {
249 if (__builtin_expect(tail,0)) {
250 if ( true ) { vst2_lane_u16(ptr + 0, (uint16x4x2_t{{r,g}}), 0); }
251 if (tail > 1) { vst2_lane_u16(ptr + 2, (uint16x4x2_t{{r,g}}), 1); }
252 if (tail > 2) { vst2_lane_u16(ptr + 4, (uint16x4x2_t{{r,g}}), 2); }
253 } else {
254 vst2_u16(ptr, (uint16x4x2_t{{r,g}}));
255 }
256 }
Mike Kleinadc78d52018-01-01 09:06:37 -0500257 SI void load3(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
258 uint16x4x3_t rgb;
259 if (__builtin_expect(tail,0)) {
260 if ( true ) { rgb = vld3_lane_u16(ptr + 0, rgb, 0); }
261 if (tail > 1) { rgb = vld3_lane_u16(ptr + 3, rgb, 1); }
262 if (tail > 2) { rgb = vld3_lane_u16(ptr + 6, rgb, 2); }
263 } else {
264 rgb = vld3_u16(ptr);
265 }
266 *r = rgb.val[0];
267 *g = rgb.val[1];
268 *b = rgb.val[2];
269 }
270 SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
271 uint16x4x4_t rgba;
272 if (__builtin_expect(tail,0)) {
273 if ( true ) { rgba = vld4_lane_u16(ptr + 0, rgba, 0); }
274 if (tail > 1) { rgba = vld4_lane_u16(ptr + 4, rgba, 1); }
275 if (tail > 2) { rgba = vld4_lane_u16(ptr + 8, rgba, 2); }
276 } else {
277 rgba = vld4_u16(ptr);
278 }
279 *r = rgba.val[0];
280 *g = rgba.val[1];
281 *b = rgba.val[2];
282 *a = rgba.val[3];
283 }
Brian Salomon217522c2019-06-11 15:55:30 -0400284
Mike Kleinadc78d52018-01-01 09:06:37 -0500285 SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
286 if (__builtin_expect(tail,0)) {
287 if ( true ) { vst4_lane_u16(ptr + 0, (uint16x4x4_t{{r,g,b,a}}), 0); }
288 if (tail > 1) { vst4_lane_u16(ptr + 4, (uint16x4x4_t{{r,g,b,a}}), 1); }
289 if (tail > 2) { vst4_lane_u16(ptr + 8, (uint16x4x4_t{{r,g,b,a}}), 2); }
290 } else {
291 vst4_u16(ptr, (uint16x4x4_t{{r,g,b,a}}));
292 }
293 }
Brian Salomon217522c2019-06-11 15:55:30 -0400294 SI void load2(const float* ptr, size_t tail, F* r, F* g) {
295 float32x4x2_t rg;
296 if (__builtin_expect(tail,0)) {
297 if ( true ) { rg = vld2q_lane_f32(ptr + 0, rg, 0); }
298 if (tail > 1) { rg = vld2q_lane_f32(ptr + 2, rg, 1); }
299 if (tail > 2) { rg = vld2q_lane_f32(ptr + 4, rg, 2); }
300 } else {
301 rg = vld2q_f32(ptr);
302 }
303 *r = rg.val[0];
304 *g = rg.val[1];
305 }
306 SI void store2(float* ptr, size_t tail, F r, F g) {
307 if (__builtin_expect(tail,0)) {
308 if ( true ) { vst2q_lane_f32(ptr + 0, (float32x4x2_t{{r,g}}), 0); }
309 if (tail > 1) { vst2q_lane_f32(ptr + 2, (float32x4x2_t{{r,g}}), 1); }
310 if (tail > 2) { vst2q_lane_f32(ptr + 4, (float32x4x2_t{{r,g}}), 2); }
311 } else {
312 vst2q_f32(ptr, (float32x4x2_t{{r,g}}));
313 }
314 }
Mike Kleinadc78d52018-01-01 09:06:37 -0500315 SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
316 float32x4x4_t rgba;
317 if (__builtin_expect(tail,0)) {
318 if ( true ) { rgba = vld4q_lane_f32(ptr + 0, rgba, 0); }
319 if (tail > 1) { rgba = vld4q_lane_f32(ptr + 4, rgba, 1); }
320 if (tail > 2) { rgba = vld4q_lane_f32(ptr + 8, rgba, 2); }
321 } else {
322 rgba = vld4q_f32(ptr);
323 }
324 *r = rgba.val[0];
325 *g = rgba.val[1];
326 *b = rgba.val[2];
327 *a = rgba.val[3];
328 }
329 SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
330 if (__builtin_expect(tail,0)) {
331 if ( true ) { vst4q_lane_f32(ptr + 0, (float32x4x4_t{{r,g,b,a}}), 0); }
332 if (tail > 1) { vst4q_lane_f32(ptr + 4, (float32x4x4_t{{r,g,b,a}}), 1); }
333 if (tail > 2) { vst4q_lane_f32(ptr + 8, (float32x4x4_t{{r,g,b,a}}), 2); }
334 } else {
335 vst4q_f32(ptr, (float32x4x4_t{{r,g,b,a}}));
336 }
337 }
338
339#elif defined(JUMPER_IS_AVX) || defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
Mike Kleinadc78d52018-01-01 09:06:37 -0500340 // These are __m256 and __m256i, but friendlier and strongly-typed.
341 template <typename T> using V = T __attribute__((ext_vector_type(8)));
342 using F = V<float >;
343 using I32 = V< int32_t>;
344 using U64 = V<uint64_t>;
345 using U32 = V<uint32_t>;
346 using U16 = V<uint16_t>;
347 using U8 = V<uint8_t >;
348
349 SI F mad(F f, F m, F a) {
350 #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
351 return _mm256_fmadd_ps(f,m,a);
352 #else
353 return f*m+a;
354 #endif
355 }
356
357 SI F min(F a, F b) { return _mm256_min_ps(a,b); }
358 SI F max(F a, F b) { return _mm256_max_ps(a,b); }
359 SI F abs_ (F v) { return _mm256_and_ps(v, 0-v); }
360 SI F floor_(F v) { return _mm256_floor_ps(v); }
361 SI F rcp (F v) { return _mm256_rcp_ps (v); }
362 SI F rsqrt (F v) { return _mm256_rsqrt_ps(v); }
363 SI F sqrt_(F v) { return _mm256_sqrt_ps (v); }
364 SI U32 round (F v, F scale) { return _mm256_cvtps_epi32(v*scale); }
365
366 SI U16 pack(U32 v) {
367 return _mm_packus_epi32(_mm256_extractf128_si256(v, 0),
368 _mm256_extractf128_si256(v, 1));
369 }
370 SI U8 pack(U16 v) {
371 auto r = _mm_packus_epi16(v,v);
Mike Klein7a177b42019-06-17 17:17:47 -0500372 return sk_unaligned_load<U8>(&r);
Mike Kleinadc78d52018-01-01 09:06:37 -0500373 }
374
375 SI F if_then_else(I32 c, F t, F e) { return _mm256_blendv_ps(e,t,c); }
376
377 template <typename T>
378 SI V<T> gather(const T* p, U32 ix) {
379 return { p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]],
380 p[ix[4]], p[ix[5]], p[ix[6]], p[ix[7]], };
381 }
382 #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
383 SI F gather(const float* p, U32 ix) { return _mm256_i32gather_ps (p, ix, 4); }
384 SI U32 gather(const uint32_t* p, U32 ix) { return _mm256_i32gather_epi32(p, ix, 4); }
385 SI U64 gather(const uint64_t* p, U32 ix) {
386 __m256i parts[] = {
387 _mm256_i32gather_epi64(p, _mm256_extracti128_si256(ix,0), 8),
388 _mm256_i32gather_epi64(p, _mm256_extracti128_si256(ix,1), 8),
389 };
390 return bit_cast<U64>(parts);
391 }
392 #endif
393
Brian Salomon217522c2019-06-11 15:55:30 -0400394 SI void load2(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
395 U16 _0123, _4567;
396 if (__builtin_expect(tail,0)) {
397 _0123 = _4567 = _mm_setzero_si128();
398 auto* d = &_0123;
399 if (tail > 3) {
400 *d = _mm_loadu_si128(((__m128i*)ptr) + 0);
401 tail -= 4;
402 ptr += 8;
403 d = &_4567;
404 }
405 bool high = false;
406 if (tail > 1) {
407 *d = _mm_loadu_si64(ptr);
408 tail -= 2;
409 ptr += 4;
410 high = true;
411 }
412 if (tail > 0) {
413 (*d)[high ? 4 : 0] = *(ptr + 0);
414 (*d)[high ? 5 : 1] = *(ptr + 1);
415 }
416 } else {
417 _0123 = _mm_loadu_si128(((__m128i*)ptr) + 0);
418 _4567 = _mm_loadu_si128(((__m128i*)ptr) + 1);
419 }
420 *r = _mm_packs_epi32(_mm_srai_epi32(_mm_slli_epi32(_0123, 16), 16),
421 _mm_srai_epi32(_mm_slli_epi32(_4567, 16), 16));
422 *g = _mm_packs_epi32(_mm_srai_epi32(_0123, 16),
423 _mm_srai_epi32(_4567, 16));
424 }
425 SI void store2(uint16_t* ptr, size_t tail, U16 r, U16 g) {
426 auto _0123 = _mm_unpacklo_epi16(r, g),
427 _4567 = _mm_unpackhi_epi16(r, g);
428 if (__builtin_expect(tail,0)) {
429 const auto* s = &_0123;
430 if (tail > 3) {
431 _mm_storeu_si128((__m128i*)ptr, *s);
432 s = &_4567;
433 tail -= 4;
434 ptr += 8;
435 }
436 bool high = false;
437 if (tail > 1) {
438 _mm_storel_epi64((__m128i*)ptr, *s);
439 ptr += 4;
440 tail -= 2;
441 high = true;
442 }
443 if (tail > 0) {
444 if (high) {
445 *(int32_t*)ptr = _mm_extract_epi32(*s, 2);
446 } else {
447 *(int32_t*)ptr = _mm_cvtsi128_si32(*s);
448 }
449 }
450 } else {
451 _mm_storeu_si128((__m128i*)ptr + 0, _0123);
452 _mm_storeu_si128((__m128i*)ptr + 1, _4567);
453 }
454 }
455
Mike Kleinadc78d52018-01-01 09:06:37 -0500456 SI void load3(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
457 __m128i _0,_1,_2,_3,_4,_5,_6,_7;
458 if (__builtin_expect(tail,0)) {
459 auto load_rgb = [](const uint16_t* src) {
460 auto v = _mm_cvtsi32_si128(*(const uint32_t*)src);
461 return _mm_insert_epi16(v, src[2], 2);
462 };
463 _1 = _2 = _3 = _4 = _5 = _6 = _7 = _mm_setzero_si128();
464 if ( true ) { _0 = load_rgb(ptr + 0); }
465 if (tail > 1) { _1 = load_rgb(ptr + 3); }
466 if (tail > 2) { _2 = load_rgb(ptr + 6); }
467 if (tail > 3) { _3 = load_rgb(ptr + 9); }
468 if (tail > 4) { _4 = load_rgb(ptr + 12); }
469 if (tail > 5) { _5 = load_rgb(ptr + 15); }
470 if (tail > 6) { _6 = load_rgb(ptr + 18); }
471 } else {
472 // Load 0+1, 2+3, 4+5 normally, and 6+7 backed up 4 bytes so we don't run over.
473 auto _01 = _mm_loadu_si128((const __m128i*)(ptr + 0)) ;
474 auto _23 = _mm_loadu_si128((const __m128i*)(ptr + 6)) ;
475 auto _45 = _mm_loadu_si128((const __m128i*)(ptr + 12)) ;
476 auto _67 = _mm_srli_si128(_mm_loadu_si128((const __m128i*)(ptr + 16)), 4);
477 _0 = _01; _1 = _mm_srli_si128(_01, 6);
478 _2 = _23; _3 = _mm_srli_si128(_23, 6);
479 _4 = _45; _5 = _mm_srli_si128(_45, 6);
480 _6 = _67; _7 = _mm_srli_si128(_67, 6);
481 }
482
483 auto _02 = _mm_unpacklo_epi16(_0, _2), // r0 r2 g0 g2 b0 b2 xx xx
484 _13 = _mm_unpacklo_epi16(_1, _3),
485 _46 = _mm_unpacklo_epi16(_4, _6),
486 _57 = _mm_unpacklo_epi16(_5, _7);
487
488 auto rg0123 = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3
489 bx0123 = _mm_unpackhi_epi16(_02, _13), // b0 b1 b2 b3 xx xx xx xx
490 rg4567 = _mm_unpacklo_epi16(_46, _57),
491 bx4567 = _mm_unpackhi_epi16(_46, _57);
492
493 *r = _mm_unpacklo_epi64(rg0123, rg4567);
494 *g = _mm_unpackhi_epi64(rg0123, rg4567);
495 *b = _mm_unpacklo_epi64(bx0123, bx4567);
496 }
497 SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
498 __m128i _01, _23, _45, _67;
499 if (__builtin_expect(tail,0)) {
500 auto src = (const double*)ptr;
501 _01 = _23 = _45 = _67 = _mm_setzero_si128();
502 if (tail > 0) { _01 = _mm_loadl_pd(_01, src+0); }
503 if (tail > 1) { _01 = _mm_loadh_pd(_01, src+1); }
504 if (tail > 2) { _23 = _mm_loadl_pd(_23, src+2); }
505 if (tail > 3) { _23 = _mm_loadh_pd(_23, src+3); }
506 if (tail > 4) { _45 = _mm_loadl_pd(_45, src+4); }
507 if (tail > 5) { _45 = _mm_loadh_pd(_45, src+5); }
508 if (tail > 6) { _67 = _mm_loadl_pd(_67, src+6); }
509 } else {
510 _01 = _mm_loadu_si128(((__m128i*)ptr) + 0);
511 _23 = _mm_loadu_si128(((__m128i*)ptr) + 1);
512 _45 = _mm_loadu_si128(((__m128i*)ptr) + 2);
513 _67 = _mm_loadu_si128(((__m128i*)ptr) + 3);
514 }
515
516 auto _02 = _mm_unpacklo_epi16(_01, _23), // r0 r2 g0 g2 b0 b2 a0 a2
517 _13 = _mm_unpackhi_epi16(_01, _23), // r1 r3 g1 g3 b1 b3 a1 a3
518 _46 = _mm_unpacklo_epi16(_45, _67),
519 _57 = _mm_unpackhi_epi16(_45, _67);
520
521 auto rg0123 = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3
522 ba0123 = _mm_unpackhi_epi16(_02, _13), // b0 b1 b2 b3 a0 a1 a2 a3
523 rg4567 = _mm_unpacklo_epi16(_46, _57),
524 ba4567 = _mm_unpackhi_epi16(_46, _57);
525
526 *r = _mm_unpacklo_epi64(rg0123, rg4567);
527 *g = _mm_unpackhi_epi64(rg0123, rg4567);
528 *b = _mm_unpacklo_epi64(ba0123, ba4567);
529 *a = _mm_unpackhi_epi64(ba0123, ba4567);
530 }
531 SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
532 auto rg0123 = _mm_unpacklo_epi16(r, g), // r0 g0 r1 g1 r2 g2 r3 g3
533 rg4567 = _mm_unpackhi_epi16(r, g), // r4 g4 r5 g5 r6 g6 r7 g7
534 ba0123 = _mm_unpacklo_epi16(b, a),
535 ba4567 = _mm_unpackhi_epi16(b, a);
536
537 auto _01 = _mm_unpacklo_epi32(rg0123, ba0123),
538 _23 = _mm_unpackhi_epi32(rg0123, ba0123),
539 _45 = _mm_unpacklo_epi32(rg4567, ba4567),
540 _67 = _mm_unpackhi_epi32(rg4567, ba4567);
541
542 if (__builtin_expect(tail,0)) {
543 auto dst = (double*)ptr;
544 if (tail > 0) { _mm_storel_pd(dst+0, _01); }
545 if (tail > 1) { _mm_storeh_pd(dst+1, _01); }
546 if (tail > 2) { _mm_storel_pd(dst+2, _23); }
547 if (tail > 3) { _mm_storeh_pd(dst+3, _23); }
548 if (tail > 4) { _mm_storel_pd(dst+4, _45); }
549 if (tail > 5) { _mm_storeh_pd(dst+5, _45); }
550 if (tail > 6) { _mm_storel_pd(dst+6, _67); }
551 } else {
552 _mm_storeu_si128((__m128i*)ptr + 0, _01);
553 _mm_storeu_si128((__m128i*)ptr + 1, _23);
554 _mm_storeu_si128((__m128i*)ptr + 2, _45);
555 _mm_storeu_si128((__m128i*)ptr + 3, _67);
556 }
557 }
558
Brian Salomon217522c2019-06-11 15:55:30 -0400559 SI void load2(const float* ptr, size_t tail, F* r, F* g) {
560 F _0123, _4567;
561 if (__builtin_expect(tail, 0)) {
562 _0123 = _4567 = _mm256_setzero_ps();
563 F* d = &_0123;
564 if (tail > 3) {
565 *d = _mm256_loadu_ps(ptr);
566 ptr += 8;
567 tail -= 4;
568 d = &_4567;
569 }
570 bool high = false;
571 if (tail > 1) {
572 *d = _mm256_castps128_ps256(_mm_loadu_ps(ptr));
573 ptr += 4;
574 tail -= 2;
575 high = true;
576 }
577 if (tail > 0) {
578 *d = high ? _mm256_insertf128_ps(*d, _mm_loadu_si64(ptr), 1)
579 : _mm256_insertf128_ps(*d, _mm_loadu_si64(ptr), 0);
580 }
581 } else {
582 _0123 = _mm256_loadu_ps(ptr + 0);
583 _4567 = _mm256_loadu_ps(ptr + 8);
584 }
585
586 F _0145 = _mm256_permute2f128_pd(_0123, _4567, 0x20),
587 _2367 = _mm256_permute2f128_pd(_0123, _4567, 0x31);
588
589 *r = _mm256_shuffle_ps(_0145, _2367, 0x88);
590 *g = _mm256_shuffle_ps(_0145, _2367, 0xDD);
591 }
592 SI void store2(float* ptr, size_t tail, F r, F g) {
593 F _0145 = _mm256_unpacklo_ps(r, g),
594 _2367 = _mm256_unpackhi_ps(r, g);
595 F _0123 = _mm256_permute2f128_pd(_0145, _2367, 0x20),
596 _4567 = _mm256_permute2f128_pd(_0145, _2367, 0x31);
597
598 if (__builtin_expect(tail, 0)) {
599 const __m256* s = &_0123;
600 if (tail > 3) {
601 _mm256_storeu_ps(ptr, *s);
602 s = &_4567;
603 tail -= 4;
604 ptr += 8;
605 }
606 bool high = false;
607 if (tail > 1) {
608 _mm_storeu_ps(ptr, _mm256_extractf128_ps(*s, 0));
609 ptr += 4;
610 tail -= 2;
611 high = true;
612 }
613 if (tail > 0) {
614 *(ptr + 0) = (*s)[ high ? 4 : 0];
615 *(ptr + 1) = (*s)[ high ? 5 : 1];
616 }
617 } else {
618 _mm256_storeu_ps(ptr + 0, _0123);
619 _mm256_storeu_ps(ptr + 8, _4567);
620 }
621 }
622
Mike Kleinadc78d52018-01-01 09:06:37 -0500623 SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
624 F _04, _15, _26, _37;
625 _04 = _15 = _26 = _37 = 0;
626 switch (tail) {
627 case 0: _37 = _mm256_insertf128_ps(_37, _mm_loadu_ps(ptr+28), 1);
628 case 7: _26 = _mm256_insertf128_ps(_26, _mm_loadu_ps(ptr+24), 1);
629 case 6: _15 = _mm256_insertf128_ps(_15, _mm_loadu_ps(ptr+20), 1);
630 case 5: _04 = _mm256_insertf128_ps(_04, _mm_loadu_ps(ptr+16), 1);
631 case 4: _37 = _mm256_insertf128_ps(_37, _mm_loadu_ps(ptr+12), 0);
632 case 3: _26 = _mm256_insertf128_ps(_26, _mm_loadu_ps(ptr+ 8), 0);
633 case 2: _15 = _mm256_insertf128_ps(_15, _mm_loadu_ps(ptr+ 4), 0);
634 case 1: _04 = _mm256_insertf128_ps(_04, _mm_loadu_ps(ptr+ 0), 0);
635 }
636
637 F rg0145 = _mm256_unpacklo_ps(_04,_15), // r0 r1 g0 g1 | r4 r5 g4 g5
638 ba0145 = _mm256_unpackhi_ps(_04,_15),
639 rg2367 = _mm256_unpacklo_ps(_26,_37),
640 ba2367 = _mm256_unpackhi_ps(_26,_37);
641
642 *r = _mm256_unpacklo_pd(rg0145, rg2367);
643 *g = _mm256_unpackhi_pd(rg0145, rg2367);
644 *b = _mm256_unpacklo_pd(ba0145, ba2367);
645 *a = _mm256_unpackhi_pd(ba0145, ba2367);
646 }
647 SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
648 F rg0145 = _mm256_unpacklo_ps(r, g), // r0 g0 r1 g1 | r4 g4 r5 g5
649 rg2367 = _mm256_unpackhi_ps(r, g), // r2 ... | r6 ...
650 ba0145 = _mm256_unpacklo_ps(b, a), // b0 a0 b1 a1 | b4 a4 b5 a5
651 ba2367 = _mm256_unpackhi_ps(b, a); // b2 ... | b6 ...
652
653 F _04 = _mm256_unpacklo_pd(rg0145, ba0145), // r0 g0 b0 a0 | r4 g4 b4 a4
654 _15 = _mm256_unpackhi_pd(rg0145, ba0145), // r1 ... | r5 ...
655 _26 = _mm256_unpacklo_pd(rg2367, ba2367), // r2 ... | r6 ...
656 _37 = _mm256_unpackhi_pd(rg2367, ba2367); // r3 ... | r7 ...
657
658 if (__builtin_expect(tail, 0)) {
659 if (tail > 0) { _mm_storeu_ps(ptr+ 0, _mm256_extractf128_ps(_04, 0)); }
660 if (tail > 1) { _mm_storeu_ps(ptr+ 4, _mm256_extractf128_ps(_15, 0)); }
661 if (tail > 2) { _mm_storeu_ps(ptr+ 8, _mm256_extractf128_ps(_26, 0)); }
662 if (tail > 3) { _mm_storeu_ps(ptr+12, _mm256_extractf128_ps(_37, 0)); }
663 if (tail > 4) { _mm_storeu_ps(ptr+16, _mm256_extractf128_ps(_04, 1)); }
664 if (tail > 5) { _mm_storeu_ps(ptr+20, _mm256_extractf128_ps(_15, 1)); }
665 if (tail > 6) { _mm_storeu_ps(ptr+24, _mm256_extractf128_ps(_26, 1)); }
666 } else {
667 F _01 = _mm256_permute2f128_ps(_04, _15, 32), // 32 == 0010 0000 == lo, lo
668 _23 = _mm256_permute2f128_ps(_26, _37, 32),
669 _45 = _mm256_permute2f128_ps(_04, _15, 49), // 49 == 0011 0001 == hi, hi
670 _67 = _mm256_permute2f128_ps(_26, _37, 49);
671 _mm256_storeu_ps(ptr+ 0, _01);
672 _mm256_storeu_ps(ptr+ 8, _23);
673 _mm256_storeu_ps(ptr+16, _45);
674 _mm256_storeu_ps(ptr+24, _67);
675 }
676 }
677
678#elif defined(JUMPER_IS_SSE2) || defined(JUMPER_IS_SSE41)
Mike Kleinadc78d52018-01-01 09:06:37 -0500679 template <typename T> using V = T __attribute__((ext_vector_type(4)));
680 using F = V<float >;
681 using I32 = V< int32_t>;
682 using U64 = V<uint64_t>;
683 using U32 = V<uint32_t>;
684 using U16 = V<uint16_t>;
685 using U8 = V<uint8_t >;
686
687 SI F mad(F f, F m, F a) { return f*m+a; }
688 SI F min(F a, F b) { return _mm_min_ps(a,b); }
689 SI F max(F a, F b) { return _mm_max_ps(a,b); }
690 SI F abs_(F v) { return _mm_and_ps(v, 0-v); }
691 SI F rcp (F v) { return _mm_rcp_ps (v); }
692 SI F rsqrt (F v) { return _mm_rsqrt_ps(v); }
693 SI F sqrt_(F v) { return _mm_sqrt_ps (v); }
694 SI U32 round(F v, F scale) { return _mm_cvtps_epi32(v*scale); }
695
696 SI U16 pack(U32 v) {
697 #if defined(JUMPER_IS_SSE41)
698 auto p = _mm_packus_epi32(v,v);
699 #else
700 // Sign extend so that _mm_packs_epi32() does the pack we want.
701 auto p = _mm_srai_epi32(_mm_slli_epi32(v, 16), 16);
702 p = _mm_packs_epi32(p,p);
703 #endif
Mike Klein7a177b42019-06-17 17:17:47 -0500704 return sk_unaligned_load<U16>(&p); // We have two copies. Return (the lower) one.
Mike Kleinadc78d52018-01-01 09:06:37 -0500705 }
706 SI U8 pack(U16 v) {
707 auto r = widen_cast<__m128i>(v);
708 r = _mm_packus_epi16(r,r);
Mike Klein7a177b42019-06-17 17:17:47 -0500709 return sk_unaligned_load<U8>(&r);
Mike Kleinadc78d52018-01-01 09:06:37 -0500710 }
711
712 SI F if_then_else(I32 c, F t, F e) {
713 return _mm_or_ps(_mm_and_ps(c, t), _mm_andnot_ps(c, e));
714 }
715
716 SI F floor_(F v) {
717 #if defined(JUMPER_IS_SSE41)
718 return _mm_floor_ps(v);
719 #else
720 F roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(v));
721 return roundtrip - if_then_else(roundtrip > v, 1, 0);
722 #endif
723 }
724
725 template <typename T>
726 SI V<T> gather(const T* p, U32 ix) {
727 return {p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]]};
728 }
729
Mike Klein0f55db52019-09-30 10:01:08 -0500730 // TODO: these loads and stores are incredibly difficult to follow.
731
Brian Salomon217522c2019-06-11 15:55:30 -0400732 SI void load2(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
733 __m128i _01;
734 if (__builtin_expect(tail,0)) {
735 _01 = _mm_setzero_si128();
736 if (tail > 1) {
737 _01 = _mm_loadl_pd(_01, (const double*)ptr); // r0 g0 r1 g1 00 00 00 00
738 if (tail > 2) {
Robert Phillipsf73ef0b2019-09-24 13:00:42 -0400739 _01 = _mm_insert_epi16(_01, *(ptr+4), 4); // r0 g0 r1 g1 r2 00 00 00
740 _01 = _mm_insert_epi16(_01, *(ptr+5), 5); // r0 g0 r1 g1 r2 g2 00 00
Brian Salomon217522c2019-06-11 15:55:30 -0400741 }
742 } else {
Mike Klein0f55db52019-09-30 10:01:08 -0500743 _01 = _mm_cvtsi32_si128(*(const uint32_t*)ptr); // r0 g0 00 00 00 00 00 00
Brian Salomon217522c2019-06-11 15:55:30 -0400744 }
745 } else {
746 _01 = _mm_loadu_si128(((__m128i*)ptr) + 0); // r0 g0 r1 g1 r2 g2 r3 g3
747 }
748 auto rg01_23 = _mm_shufflelo_epi16(_01, 0xD8); // r0 r1 g0 g1 r2 g2 r3 g3
749 auto rg = _mm_shufflehi_epi16(rg01_23, 0xD8); // r0 r1 g0 g1 r2 r3 g2 g3
750
751 auto R = _mm_shuffle_epi32(rg, 0x88); // r0 r1 r2 r3 r0 r1 r2 r3
752 auto G = _mm_shuffle_epi32(rg, 0xDD); // g0 g1 g2 g3 g0 g1 g2 g3
Mike Klein7a177b42019-06-17 17:17:47 -0500753 *r = sk_unaligned_load<U16>(&R);
754 *g = sk_unaligned_load<U16>(&G);
Brian Salomon217522c2019-06-11 15:55:30 -0400755 }
756 SI void store2(uint16_t* ptr, size_t tail, U16 r, U16 g) {
757 U32 rg = _mm_unpacklo_epi16(widen_cast<__m128i>(r), widen_cast<__m128i>(g));
758 if (__builtin_expect(tail, 0)) {
759 if (tail > 1) {
760 _mm_storel_epi64((__m128i*)ptr, rg);
761 if (tail > 2) {
762 int32_t rgpair = rg[2];
763 memcpy(ptr + 4, &rgpair, sizeof(rgpair));
764 }
765 } else {
766 int32_t rgpair = rg[0];
767 memcpy(ptr, &rgpair, sizeof(rgpair));
768 }
769 } else {
770 _mm_storeu_si128((__m128i*)ptr + 0, rg);
771 }
772 }
773
Mike Kleinadc78d52018-01-01 09:06:37 -0500774 SI void load3(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
775 __m128i _0, _1, _2, _3;
776 if (__builtin_expect(tail,0)) {
777 _1 = _2 = _3 = _mm_setzero_si128();
778 auto load_rgb = [](const uint16_t* src) {
779 auto v = _mm_cvtsi32_si128(*(const uint32_t*)src);
780 return _mm_insert_epi16(v, src[2], 2);
781 };
782 if ( true ) { _0 = load_rgb(ptr + 0); }
783 if (tail > 1) { _1 = load_rgb(ptr + 3); }
784 if (tail > 2) { _2 = load_rgb(ptr + 6); }
785 } else {
786 // Load slightly weirdly to make sure we don't load past the end of 4x48 bits.
787 auto _01 = _mm_loadu_si128((const __m128i*)(ptr + 0)) ,
788 _23 = _mm_srli_si128(_mm_loadu_si128((const __m128i*)(ptr + 4)), 4);
789
790 // Each _N holds R,G,B for pixel N in its lower 3 lanes (upper 5 are ignored).
791 _0 = _01;
792 _1 = _mm_srli_si128(_01, 6);
793 _2 = _23;
794 _3 = _mm_srli_si128(_23, 6);
795 }
796
797 // De-interlace to R,G,B.
798 auto _02 = _mm_unpacklo_epi16(_0, _2), // r0 r2 g0 g2 b0 b2 xx xx
799 _13 = _mm_unpacklo_epi16(_1, _3); // r1 r3 g1 g3 b1 b3 xx xx
800
801 auto R = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3
802 G = _mm_srli_si128(R, 8),
803 B = _mm_unpackhi_epi16(_02, _13); // b0 b1 b2 b3 xx xx xx xx
804
Mike Klein7a177b42019-06-17 17:17:47 -0500805 *r = sk_unaligned_load<U16>(&R);
806 *g = sk_unaligned_load<U16>(&G);
807 *b = sk_unaligned_load<U16>(&B);
Mike Kleinadc78d52018-01-01 09:06:37 -0500808 }
809
810 SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
811 __m128i _01, _23;
812 if (__builtin_expect(tail,0)) {
813 _01 = _23 = _mm_setzero_si128();
814 auto src = (const double*)ptr;
815 if ( true ) { _01 = _mm_loadl_pd(_01, src + 0); } // r0 g0 b0 a0 00 00 00 00
816 if (tail > 1) { _01 = _mm_loadh_pd(_01, src + 1); } // r0 g0 b0 a0 r1 g1 b1 a1
817 if (tail > 2) { _23 = _mm_loadl_pd(_23, src + 2); } // r2 g2 b2 a2 00 00 00 00
818 } else {
819 _01 = _mm_loadu_si128(((__m128i*)ptr) + 0); // r0 g0 b0 a0 r1 g1 b1 a1
820 _23 = _mm_loadu_si128(((__m128i*)ptr) + 1); // r2 g2 b2 a2 r3 g3 b3 a3
821 }
822
823 auto _02 = _mm_unpacklo_epi16(_01, _23), // r0 r2 g0 g2 b0 b2 a0 a2
824 _13 = _mm_unpackhi_epi16(_01, _23); // r1 r3 g1 g3 b1 b3 a1 a3
825
826 auto rg = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3
827 ba = _mm_unpackhi_epi16(_02, _13); // b0 b1 b2 b3 a0 a1 a2 a3
828
Mike Klein7a177b42019-06-17 17:17:47 -0500829 *r = sk_unaligned_load<U16>((uint16_t*)&rg + 0);
830 *g = sk_unaligned_load<U16>((uint16_t*)&rg + 4);
831 *b = sk_unaligned_load<U16>((uint16_t*)&ba + 0);
832 *a = sk_unaligned_load<U16>((uint16_t*)&ba + 4);
Mike Kleinadc78d52018-01-01 09:06:37 -0500833 }
834
835 SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
836 auto rg = _mm_unpacklo_epi16(widen_cast<__m128i>(r), widen_cast<__m128i>(g)),
837 ba = _mm_unpacklo_epi16(widen_cast<__m128i>(b), widen_cast<__m128i>(a));
838
839 if (__builtin_expect(tail, 0)) {
840 auto dst = (double*)ptr;
841 if ( true ) { _mm_storel_pd(dst + 0, _mm_unpacklo_epi32(rg, ba)); }
842 if (tail > 1) { _mm_storeh_pd(dst + 1, _mm_unpacklo_epi32(rg, ba)); }
843 if (tail > 2) { _mm_storel_pd(dst + 2, _mm_unpackhi_epi32(rg, ba)); }
844 } else {
845 _mm_storeu_si128((__m128i*)ptr + 0, _mm_unpacklo_epi32(rg, ba));
846 _mm_storeu_si128((__m128i*)ptr + 1, _mm_unpackhi_epi32(rg, ba));
847 }
848 }
849
Brian Salomon217522c2019-06-11 15:55:30 -0400850 SI void load2(const float* ptr, size_t tail, F* r, F* g) {
851 F _01, _23;
852 if (__builtin_expect(tail, 0)) {
853 _01 = _23 = _mm_setzero_si128();
854 if ( true ) { _01 = _mm_loadl_pi(_01, (__m64 const*)(ptr + 0)); }
855 if (tail > 1) { _01 = _mm_loadh_pi(_01, (__m64 const*)(ptr + 2)); }
856 if (tail > 2) { _23 = _mm_loadl_pi(_23, (__m64 const*)(ptr + 4)); }
857 } else {
858 _01 = _mm_loadu_ps(ptr + 0);
859 _23 = _mm_loadu_ps(ptr + 4);
860 }
861 *r = _mm_shuffle_ps(_01, _23, 0x88);
862 *g = _mm_shuffle_ps(_01, _23, 0xDD);
863 }
864 SI void store2(float* ptr, size_t tail, F r, F g) {
865 F _01 = _mm_unpacklo_ps(r, g),
866 _23 = _mm_unpackhi_ps(r, g);
867 if (__builtin_expect(tail, 0)) {
868 if ( true ) { _mm_storel_pi((__m64*)(ptr + 0), _01); }
869 if (tail > 1) { _mm_storeh_pi((__m64*)(ptr + 2), _01); }
870 if (tail > 2) { _mm_storel_pi((__m64*)(ptr + 4), _23); }
871 } else {
872 _mm_storeu_ps(ptr + 0, _01);
873 _mm_storeu_ps(ptr + 4, _23);
874 }
875 }
876
Mike Kleinadc78d52018-01-01 09:06:37 -0500877 SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
878 F _0, _1, _2, _3;
879 if (__builtin_expect(tail, 0)) {
880 _1 = _2 = _3 = _mm_setzero_si128();
881 if ( true ) { _0 = _mm_loadu_ps(ptr + 0); }
882 if (tail > 1) { _1 = _mm_loadu_ps(ptr + 4); }
883 if (tail > 2) { _2 = _mm_loadu_ps(ptr + 8); }
884 } else {
885 _0 = _mm_loadu_ps(ptr + 0);
886 _1 = _mm_loadu_ps(ptr + 4);
887 _2 = _mm_loadu_ps(ptr + 8);
888 _3 = _mm_loadu_ps(ptr +12);
889 }
890 _MM_TRANSPOSE4_PS(_0,_1,_2,_3);
891 *r = _0;
892 *g = _1;
893 *b = _2;
894 *a = _3;
895 }
896
897 SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
898 _MM_TRANSPOSE4_PS(r,g,b,a);
899 if (__builtin_expect(tail, 0)) {
900 if ( true ) { _mm_storeu_ps(ptr + 0, r); }
901 if (tail > 1) { _mm_storeu_ps(ptr + 4, g); }
902 if (tail > 2) { _mm_storeu_ps(ptr + 8, b); }
903 } else {
904 _mm_storeu_ps(ptr + 0, r);
905 _mm_storeu_ps(ptr + 4, g);
906 _mm_storeu_ps(ptr + 8, b);
907 _mm_storeu_ps(ptr +12, a);
908 }
909 }
910#endif
911
912// We need to be a careful with casts.
913// (F)x means cast x to float in the portable path, but bit_cast x to float in the others.
914// These named casts and bit_cast() are always what they seem to be.
915#if defined(JUMPER_IS_SCALAR)
916 SI F cast (U32 v) { return (F)v; }
Brian Salomond608e222019-06-12 17:42:58 -0400917 SI F cast64(U64 v) { return (F)v; }
Mike Kleinadc78d52018-01-01 09:06:37 -0500918 SI U32 trunc_(F v) { return (U32)v; }
919 SI U32 expand(U16 v) { return (U32)v; }
920 SI U32 expand(U8 v) { return (U32)v; }
921#else
922 SI F cast (U32 v) { return __builtin_convertvector((I32)v, F); }
Brian Salomond608e222019-06-12 17:42:58 -0400923 SI F cast64(U64 v) { return __builtin_convertvector( v, F); }
Mike Kleinadc78d52018-01-01 09:06:37 -0500924 SI U32 trunc_(F v) { return (U32)__builtin_convertvector( v, I32); }
925 SI U32 expand(U16 v) { return __builtin_convertvector( v, U32); }
926 SI U32 expand(U8 v) { return __builtin_convertvector( v, U32); }
927#endif
928
929template <typename V>
930SI V if_then_else(I32 c, V t, V e) {
931 return bit_cast<V>(if_then_else(c, bit_cast<F>(t), bit_cast<F>(e)));
932}
933
934SI U16 bswap(U16 x) {
935#if defined(JUMPER_IS_SSE2) || defined(JUMPER_IS_SSE41)
936 // Somewhat inexplicably Clang decides to do (x<<8) | (x>>8) in 32-bit lanes
937 // when generating code for SSE2 and SSE4.1. We'll do it manually...
938 auto v = widen_cast<__m128i>(x);
939 v = _mm_slli_epi16(v,8) | _mm_srli_epi16(v,8);
Mike Klein7a177b42019-06-17 17:17:47 -0500940 return sk_unaligned_load<U16>(&v);
Mike Kleinadc78d52018-01-01 09:06:37 -0500941#else
942 return (x<<8) | (x>>8);
943#endif
944}
945
946SI F fract(F v) { return v - floor_(v); }
947
948// See http://www.machinedlearnings.com/2011/06/fast-approximate-logarithm-exponential.html.
949SI F approx_log2(F x) {
950 // e - 127 is a fair approximation of log2(x) in its own right...
951 F e = cast(bit_cast<U32>(x)) * (1.0f / (1<<23));
952
953 // ... but using the mantissa to refine its error is _much_ better.
954 F m = bit_cast<F>((bit_cast<U32>(x) & 0x007fffff) | 0x3f000000);
955 return e
956 - 124.225514990f
957 - 1.498030302f * m
958 - 1.725879990f / (0.3520887068f + m);
959}
Brian Osman11e6aa82019-10-16 13:58:42 -0400960
961SI F approx_log(F x) {
962 const float ln2 = 0.69314718f;
963 return ln2 * approx_log2(x);
964}
965
Mike Kleinadc78d52018-01-01 09:06:37 -0500966SI F approx_pow2(F x) {
967 F f = fract(x);
968 return bit_cast<F>(round(1.0f * (1<<23),
969 x + 121.274057500f
970 - 1.490129070f * f
971 + 27.728023300f / (4.84252568f - f)));
972}
973
Brian Osman11e6aa82019-10-16 13:58:42 -0400974SI F approx_exp(F x) {
975 const float log2_e = 1.4426950408889634074f;
976 return approx_pow2(log2_e * x);
977}
978
Mike Kleinadc78d52018-01-01 09:06:37 -0500979SI F approx_powf(F x, F y) {
Mike Klein229befe2018-10-26 12:07:57 -0400980#if defined(SK_LEGACY_APPROX_POWF_SPECIALCASE)
981 return if_then_else((x == 0) , 0
982#else
983 return if_then_else((x == 0)|(x == 1), x
984#endif
985 , approx_pow2(approx_log2(x) * y));
Mike Kleinadc78d52018-01-01 09:06:37 -0500986}
987
988SI F from_half(U16 h) {
Mike Klein7aacb0b2019-07-02 13:23:06 -0500989#if defined(JUMPER_IS_NEON) && defined(SK_CPU_ARM64) \
990 && !defined(SK_BUILD_FOR_GOOGLE3) // Temporary workaround for some Google3 builds.
Mike Kleinadc78d52018-01-01 09:06:37 -0500991 return vcvt_f32_f16(h);
992
993#elif defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
994 return _mm256_cvtph_ps(h);
995
996#else
997 // Remember, a half is 1-5-10 (sign-exponent-mantissa) with 15 exponent bias.
998 U32 sem = expand(h),
999 s = sem & 0x8000,
1000 em = sem ^ s;
1001
1002 // Convert to 1-8-23 float with 127 bias, flushing denorm halfs (including zero) to zero.
1003 auto denorm = (I32)em < 0x0400; // I32 comparison is often quicker, and always safe here.
1004 return if_then_else(denorm, F(0)
1005 , bit_cast<F>( (s<<16) + (em<<13) + ((127-15)<<23) ));
1006#endif
1007}
1008
1009SI U16 to_half(F f) {
Mike Klein7aacb0b2019-07-02 13:23:06 -05001010#if defined(JUMPER_IS_NEON) && defined(SK_CPU_ARM64) \
1011 && !defined(SK_BUILD_FOR_GOOGLE3) // Temporary workaround for some Google3 builds.
Mike Kleinadc78d52018-01-01 09:06:37 -05001012 return vcvt_f16_f32(f);
1013
1014#elif defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
1015 return _mm256_cvtps_ph(f, _MM_FROUND_CUR_DIRECTION);
1016
1017#else
1018 // Remember, a float is 1-8-23 (sign-exponent-mantissa) with 127 exponent bias.
1019 U32 sem = bit_cast<U32>(f),
1020 s = sem & 0x80000000,
1021 em = sem ^ s;
1022
1023 // Convert to 1-5-10 half with 15 bias, flushing denorm halfs (including zero) to zero.
1024 auto denorm = (I32)em < 0x38800000; // I32 comparison is often quicker, and always safe here.
1025 return pack(if_then_else(denorm, U32(0)
1026 , (s>>16) + (em>>13) - ((127-15)<<10)));
1027#endif
1028}
1029
1030// Our fundamental vector depth is our pixel stride.
1031static const size_t N = sizeof(F) / sizeof(float);
1032
Mike Kleinb9c4a6f2017-04-03 13:54:55 -04001033// We're finally going to get to what a Stage function looks like!
Mike Klein0e4d0962017-09-27 11:04:34 -04001034// tail == 0 ~~> work on a full N pixels
Mike Kleinb5e48422017-05-30 18:09:29 -04001035// tail != 0 ~~> work on only the first tail pixels
Mike Klein0e4d0962017-09-27 11:04:34 -04001036// tail is always < N.
Mike Kleinf1b24e02017-07-27 12:31:34 -04001037
Mike Klein4d4b3aa2018-03-21 13:07:35 -04001038// Any custom ABI to use for all (non-externally-facing) stage functions?
1039// Also decide here whether to use narrow (compromise) or wide (ideal) stages.
Mike Klein15eb1e92018-08-31 11:21:27 -04001040#if defined(SK_CPU_ARM32) && defined(JUMPER_IS_NEON)
Mike Klein1b9b7d52018-02-27 10:37:40 -05001041 // This lets us pass vectors more efficiently on 32-bit ARM.
Mike Klein4d4b3aa2018-03-21 13:07:35 -04001042 // We can still only pass 16 floats, so best as 4x {r,g,b,a}.
Mike Klein1b9b7d52018-02-27 10:37:40 -05001043 #define ABI __attribute__((pcs("aapcs-vfp")))
Mike Klein1b9b7d52018-02-27 10:37:40 -05001044 #define JUMPER_NARROW_STAGES 1
Mike Klein4d4b3aa2018-03-21 13:07:35 -04001045#elif 0 && defined(_MSC_VER) && defined(__clang__) && defined(__x86_64__)
1046 // SysV ABI makes it very sensible to use wide stages with clang-cl.
1047 // TODO: crashes during compilation :(
1048 #define ABI __attribute__((sysv_abi))
Mike Klein1b9b7d52018-02-27 10:37:40 -05001049 #define JUMPER_NARROW_STAGES 0
Mike Klein4d4b3aa2018-03-21 13:07:35 -04001050#elif defined(_MSC_VER)
1051 // Even if not vectorized, this lets us pass {r,g,b,a} as registers,
1052 // instead of {b,a} on the stack. Narrow stages work best for __vectorcall.
1053 #define ABI __vectorcall
1054 #define JUMPER_NARROW_STAGES 1
Mike Klein15eb1e92018-08-31 11:21:27 -04001055#elif defined(__x86_64__) || defined(SK_CPU_ARM64)
Mike Klein4d4b3aa2018-03-21 13:07:35 -04001056 // These platforms are ideal for wider stages, and their default ABI is ideal.
1057 #define ABI
1058 #define JUMPER_NARROW_STAGES 0
1059#else
1060 // 32-bit or unknown... shunt them down the narrow path.
1061 // Odds are these have few registers and are better off there.
1062 #define ABI
1063 #define JUMPER_NARROW_STAGES 1
Mike Klein1b9b7d52018-02-27 10:37:40 -05001064#endif
1065
1066#if JUMPER_NARROW_STAGES
Mike Kleinf1b24e02017-07-27 12:31:34 -04001067 struct Params {
Mike Kleinb4379132017-10-17 16:06:49 -04001068 size_t dx, dy, tail;
Mike Kleinf1b24e02017-07-27 12:31:34 -04001069 F dr,dg,db,da;
1070 };
Mike Klein376fd312017-12-11 16:53:26 -05001071 using Stage = void(ABI*)(Params*, void** program, F r, F g, F b, F a);
Mike Kleinf1b24e02017-07-27 12:31:34 -04001072#else
1073 // We keep program the second argument, so that it's passed in rsi for load_and_inc().
Mike Klein376fd312017-12-11 16:53:26 -05001074 using Stage = void(ABI*)(size_t tail, void** program, size_t dx, size_t dy, F,F,F,F, F,F,F,F);
Mike Kleinf1b24e02017-07-27 12:31:34 -04001075#endif
Mike Kleinb9c4a6f2017-04-03 13:54:55 -04001076
Mike Klein376fd312017-12-11 16:53:26 -05001077
Mike Klein1b9b7d52018-02-27 10:37:40 -05001078static void start_pipeline(size_t dx, size_t dy, size_t xlimit, size_t ylimit, void** program) {
Mike Klein376fd312017-12-11 16:53:26 -05001079 auto start = (Stage)load_and_inc(program);
Mike Kleinb4379132017-10-17 16:06:49 -04001080 const size_t x0 = dx;
1081 for (; dy < ylimit; dy++) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05001082 #if JUMPER_NARROW_STAGES
Mike Kleinb4379132017-10-17 16:06:49 -04001083 Params params = { x0,dy,0, 0,0,0,0 };
1084 while (params.dx + N <= xlimit) {
Mike Kleinabb8bb32017-09-27 11:12:01 -04001085 start(&params,program, 0,0,0,0);
Mike Kleinb4379132017-10-17 16:06:49 -04001086 params.dx += N;
Mike Kleinf1b24e02017-07-27 12:31:34 -04001087 }
Mike Kleinb4379132017-10-17 16:06:49 -04001088 if (size_t tail = xlimit - params.dx) {
Mike Kleinf1b24e02017-07-27 12:31:34 -04001089 params.tail = tail;
Mike Kleinabb8bb32017-09-27 11:12:01 -04001090 start(&params,program, 0,0,0,0);
Mike Kleinf1b24e02017-07-27 12:31:34 -04001091 }
1092 #else
Mike Kleinb4379132017-10-17 16:06:49 -04001093 dx = x0;
1094 while (dx + N <= xlimit) {
1095 start(0,program,dx,dy, 0,0,0,0, 0,0,0,0);
1096 dx += N;
Mike Klein45c16fa2017-07-18 18:15:13 -04001097 }
Mike Kleinb4379132017-10-17 16:06:49 -04001098 if (size_t tail = xlimit - dx) {
1099 start(tail,program,dx,dy, 0,0,0,0, 0,0,0,0);
Mike Klein45c16fa2017-07-18 18:15:13 -04001100 }
Mike Kleinf1b24e02017-07-27 12:31:34 -04001101 #endif
Mike Klein3b92b692017-07-18 11:30:25 -04001102 }
1103}
1104
Mike Klein1b9b7d52018-02-27 10:37:40 -05001105#if JUMPER_NARROW_STAGES
1106 #define STAGE(name, ...) \
1107 SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, \
1108 F& r, F& g, F& b, F& a, F& dr, F& dg, F& db, F& da); \
Mike Klein4d4b3aa2018-03-21 13:07:35 -04001109 static void ABI name(Params* params, void** program, \
Mike Klein1b9b7d52018-02-27 10:37:40 -05001110 F r, F g, F b, F a) { \
1111 name##_k(Ctx{program},params->dx,params->dy,params->tail, r,g,b,a, \
1112 params->dr, params->dg, params->db, params->da); \
1113 auto next = (Stage)load_and_inc(program); \
1114 next(params,program, r,g,b,a); \
1115 } \
1116 SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, \
Mike Kleinf1b24e02017-07-27 12:31:34 -04001117 F& r, F& g, F& b, F& a, F& dr, F& dg, F& db, F& da)
1118#else
Mike Klein1b9b7d52018-02-27 10:37:40 -05001119 #define STAGE(name, ...) \
1120 SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, \
1121 F& r, F& g, F& b, F& a, F& dr, F& dg, F& db, F& da); \
Mike Klein4d4b3aa2018-03-21 13:07:35 -04001122 static void ABI name(size_t tail, void** program, size_t dx, size_t dy, \
Mike Klein1b9b7d52018-02-27 10:37:40 -05001123 F r, F g, F b, F a, F dr, F dg, F db, F da) { \
1124 name##_k(Ctx{program},dx,dy,tail, r,g,b,a, dr,dg,db,da); \
1125 auto next = (Stage)load_and_inc(program); \
1126 next(tail,program,dx,dy, r,g,b,a, dr,dg,db,da); \
1127 } \
1128 SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, \
Mike Kleinf1b24e02017-07-27 12:31:34 -04001129 F& r, F& g, F& b, F& a, F& dr, F& dg, F& db, F& da)
1130#endif
Mike Kleinb5e48422017-05-30 18:09:29 -04001131
Mike Kleinb9c4a6f2017-04-03 13:54:55 -04001132
1133// just_return() is a simple no-op stage that only exists to end the chain,
1134// returning back up to start_pipeline(), and from there to the caller.
Mike Klein1b9b7d52018-02-27 10:37:40 -05001135#if JUMPER_NARROW_STAGES
Mike Klein4d4b3aa2018-03-21 13:07:35 -04001136 static void ABI just_return(Params*, void**, F,F,F,F) {}
Mike Kleinf1b24e02017-07-27 12:31:34 -04001137#else
Mike Klein4d4b3aa2018-03-21 13:07:35 -04001138 static void ABI just_return(size_t, void**, size_t,size_t, F,F,F,F, F,F,F,F) {}
Mike Kleinf1b24e02017-07-27 12:31:34 -04001139#endif
Mike Kleinb9c4a6f2017-04-03 13:54:55 -04001140
1141
Mike Klein8a823fa2017-04-05 17:29:26 -04001142// We could start defining normal Stages now. But first, some helper functions.
Mike Kleinb9c4a6f2017-04-03 13:54:55 -04001143
1144// These load() and store() methods are tail-aware,
1145// but focus mainly on keeping the at-stride tail==0 case fast.
Mike Kleine1caee12017-02-15 13:31:12 -05001146
Mike Kleinc31858b2017-03-01 13:07:40 -05001147template <typename V, typename T>
Mike Klein64b97482017-03-14 17:35:04 -07001148SI V load(const T* src, size_t tail) {
Mike Kleind6e12862017-08-28 12:18:26 -04001149#if !defined(JUMPER_IS_SCALAR)
Mike Klein0e4d0962017-09-27 11:04:34 -04001150 __builtin_assume(tail < N);
Mike Kleinc31858b2017-03-01 13:07:40 -05001151 if (__builtin_expect(tail, 0)) {
1152 V v{}; // Any inactive lanes are zeroed.
Mike Kleinc4fcbed2017-06-26 16:12:48 -04001153 switch (tail) {
1154 case 7: v[6] = src[6];
1155 case 6: v[5] = src[5];
1156 case 5: v[4] = src[4];
1157 case 4: memcpy(&v, src, 4*sizeof(T)); break;
1158 case 3: v[2] = src[2];
1159 case 2: memcpy(&v, src, 2*sizeof(T)); break;
1160 case 1: memcpy(&v, src, 1*sizeof(T)); break;
Mike Kleinc31858b2017-03-01 13:07:40 -05001161 }
1162 return v;
1163 }
1164#endif
Mike Klein7a177b42019-06-17 17:17:47 -05001165 return sk_unaligned_load<V>(src);
Mike Kleinc31858b2017-03-01 13:07:40 -05001166}
1167
Mike Kleinc31858b2017-03-01 13:07:40 -05001168template <typename V, typename T>
Mike Klein64b97482017-03-14 17:35:04 -07001169SI void store(T* dst, V v, size_t tail) {
Mike Kleind6e12862017-08-28 12:18:26 -04001170#if !defined(JUMPER_IS_SCALAR)
Mike Klein0e4d0962017-09-27 11:04:34 -04001171 __builtin_assume(tail < N);
Mike Kleinc31858b2017-03-01 13:07:40 -05001172 if (__builtin_expect(tail, 0)) {
Mike Kleinc4fcbed2017-06-26 16:12:48 -04001173 switch (tail) {
1174 case 7: dst[6] = v[6];
1175 case 6: dst[5] = v[5];
1176 case 5: dst[4] = v[4];
1177 case 4: memcpy(dst, &v, 4*sizeof(T)); break;
1178 case 3: dst[2] = v[2];
1179 case 2: memcpy(dst, &v, 2*sizeof(T)); break;
1180 case 1: memcpy(dst, &v, 1*sizeof(T)); break;
Mike Kleinc31858b2017-03-01 13:07:40 -05001181 }
1182 return;
1183 }
1184#endif
Mike Klein7a177b42019-06-17 17:17:47 -05001185 sk_unaligned_store(dst, v);
Mike Kleinc31858b2017-03-01 13:07:40 -05001186}
1187
Mike Klein40de6da2017-04-07 13:09:29 -04001188SI F from_byte(U8 b) {
Mike Kleinfe560a82017-05-01 12:56:35 -04001189 return cast(expand(b)) * (1/255.0f);
Mike Klein40de6da2017-04-07 13:09:29 -04001190}
Brian Salomon217522c2019-06-11 15:55:30 -04001191SI F from_short(U16 s) {
1192 return cast(expand(s)) * (1/65535.0f);
1193}
Mike Klein64b97482017-03-14 17:35:04 -07001194SI void from_565(U16 _565, F* r, F* g, F* b) {
Mike Klein3f81f372017-02-23 13:03:57 -05001195 U32 wide = expand(_565);
Mike Kleinfe560a82017-05-01 12:56:35 -04001196 *r = cast(wide & (31<<11)) * (1.0f / (31<<11));
1197 *g = cast(wide & (63<< 5)) * (1.0f / (63<< 5));
1198 *b = cast(wide & (31<< 0)) * (1.0f / (31<< 0));
Mike Kleine1caee12017-02-15 13:31:12 -05001199}
Mike Kleinf809fef2017-03-31 13:52:45 -04001200SI void from_4444(U16 _4444, F* r, F* g, F* b, F* a) {
1201 U32 wide = expand(_4444);
Mike Kleinfe560a82017-05-01 12:56:35 -04001202 *r = cast(wide & (15<<12)) * (1.0f / (15<<12));
1203 *g = cast(wide & (15<< 8)) * (1.0f / (15<< 8));
1204 *b = cast(wide & (15<< 4)) * (1.0f / (15<< 4));
1205 *a = cast(wide & (15<< 0)) * (1.0f / (15<< 0));
Mike Kleinf809fef2017-03-31 13:52:45 -04001206}
Mike Kleindec4ea82017-04-06 15:04:05 -04001207SI void from_8888(U32 _8888, F* r, F* g, F* b, F* a) {
Mike Kleinfe560a82017-05-01 12:56:35 -04001208 *r = cast((_8888 ) & 0xff) * (1/255.0f);
1209 *g = cast((_8888 >> 8) & 0xff) * (1/255.0f);
1210 *b = cast((_8888 >> 16) & 0xff) * (1/255.0f);
1211 *a = cast((_8888 >> 24) ) * (1/255.0f);
Mike Kleindec4ea82017-04-06 15:04:05 -04001212}
Brian Salomon217522c2019-06-11 15:55:30 -04001213SI void from_88(U16 _88, F* r, F* g) {
1214 U32 wide = expand(_88);
1215 *r = cast((wide ) & 0xff) * (1/255.0f);
1216 *g = cast((wide >> 8) & 0xff) * (1/255.0f);
1217}
Mike Kleinac568a92018-01-25 09:09:32 -05001218SI void from_1010102(U32 rgba, F* r, F* g, F* b, F* a) {
1219 *r = cast((rgba ) & 0x3ff) * (1/1023.0f);
1220 *g = cast((rgba >> 10) & 0x3ff) * (1/1023.0f);
1221 *b = cast((rgba >> 20) & 0x3ff) * (1/1023.0f);
1222 *a = cast((rgba >> 30) ) * (1/ 3.0f);
1223}
Brian Salomon217522c2019-06-11 15:55:30 -04001224SI void from_1616(U32 _1616, F* r, F* g) {
1225 *r = cast((_1616 ) & 0xffff) * (1/65535.0f);
1226 *g = cast((_1616 >> 16) & 0xffff) * (1/65535.0f);
1227}
Brian Salomond608e222019-06-12 17:42:58 -04001228SI void from_16161616(U64 _16161616, F* r, F* g, F* b, F* a) {
1229 *r = cast64((_16161616 ) & 0xffff) * (1/65535.0f);
1230 *g = cast64((_16161616 >> 16) & 0xffff) * (1/65535.0f);
1231 *b = cast64((_16161616 >> 32) & 0xffff) * (1/65535.0f);
1232 *a = cast64((_16161616 >> 48) & 0xffff) * (1/65535.0f);
1233}
Mike Kleindec4ea82017-04-06 15:04:05 -04001234
Mike Kleinb4379132017-10-17 16:06:49 -04001235// Used by load_ and store_ stages to get to the right (dx,dy) starting point of contiguous memory.
Mike Klein45c16fa2017-07-18 18:15:13 -04001236template <typename T>
Mike Kleinb11ab572018-10-24 06:42:14 -04001237SI T* ptr_at_xy(const SkRasterPipeline_MemoryCtx* ctx, size_t dx, size_t dy) {
Mike Kleinb4379132017-10-17 16:06:49 -04001238 return (T*)ctx->pixels + dy*ctx->stride + dx;
Mike Klein45c16fa2017-07-18 18:15:13 -04001239}
1240
Mike Klein1fa9c432017-12-11 09:59:47 -05001241// clamp v to [0,limit).
1242SI F clamp(F v, F limit) {
1243 F inclusive = bit_cast<F>( bit_cast<U32>(limit) - 1 ); // Exclusive -> inclusive.
1244 return min(max(0, v), inclusive);
1245}
1246
Mike Klein45c16fa2017-07-18 18:15:13 -04001247// Used by gather_ stages to calculate the base pointer and a vector of indices to load.
Mike Kleindec4ea82017-04-06 15:04:05 -04001248template <typename T>
Mike Kleinb11ab572018-10-24 06:42:14 -04001249SI U32 ix_and_ptr(T** ptr, const SkRasterPipeline_GatherCtx* ctx, F x, F y) {
Mike Kleinf3b4e162017-09-22 15:32:59 -04001250 x = clamp(x, ctx->width);
1251 y = clamp(y, ctx->height);
1252
Mike Kleindec4ea82017-04-06 15:04:05 -04001253 *ptr = (const T*)ctx->pixels;
1254 return trunc_(y)*ctx->stride + trunc_(x);
1255}
Mike Kleine1caee12017-02-15 13:31:12 -05001256
Mike Klein37155d42017-12-15 09:55:03 -05001257// We often have a nominally [0,1] float value we need to scale and convert to an integer,
1258// whether for a table lookup or to pack back down into bytes for storage.
1259//
1260// In practice, especially when dealing with interesting color spaces, that notionally
1261// [0,1] float may be out of [0,1] range. Unorms cannot represent that, so we must clamp.
1262//
1263// You can adjust the expected input to [0,bias] by tweaking that parameter.
1264SI U32 to_unorm(F v, F scale, F bias = 1.0f) {
1265 // TODO: platform-specific implementations to to_unorm(), removing round() entirely?
1266 // Any time we use round() we probably want to use to_unorm().
1267 return round(min(max(0, v), bias), scale);
1268}
1269
Mike Reeddfc0e912018-02-16 12:40:18 -05001270SI I32 cond_to_mask(I32 cond) { return if_then_else(cond, I32(~0), I32(0)); }
1271
Mike Kleinb9c4a6f2017-04-03 13:54:55 -04001272// Now finally, normal Stages!
Mike Kleine1caee12017-02-15 13:31:12 -05001273
Mike Kleine8de0242018-03-10 12:37:11 -05001274STAGE(seed_shader, Ctx::None) {
1275 static const float iota[] = {
1276 0.5f, 1.5f, 2.5f, 3.5f, 4.5f, 5.5f, 6.5f, 7.5f,
1277 8.5f, 9.5f,10.5f,11.5f,12.5f,13.5f,14.5f,15.5f,
1278 };
Mike Kleinb4379132017-10-17 16:06:49 -04001279 // It's important for speed to explicitly cast(dx) and cast(dy),
Mike Kleine1caee12017-02-15 13:31:12 -05001280 // which has the effect of splatting them to vectors before converting to floats.
1281 // On Intel this breaks a data dependency on previous loop iterations' registers.
Mike Klein7a177b42019-06-17 17:17:47 -05001282 r = cast(dx) + sk_unaligned_load<F>(iota);
Mike Kleinb4379132017-10-17 16:06:49 -04001283 g = cast(dy) + 0.5f;
Mike Klein2229b572017-04-21 10:30:29 -04001284 b = 1.0f;
Mike Kleine1caee12017-02-15 13:31:12 -05001285 a = 0;
1286 dr = dg = db = da = 0;
1287}
1288
Mike Kleinf7729c22017-09-27 11:42:30 -04001289STAGE(dither, const float* rate) {
Mike Kleinb4379132017-10-17 16:06:49 -04001290 // Get [(dx,dy), (dx+1,dy), (dx+2,dy), ...] loaded up in integer vectors.
Mike Klein856b3c32017-08-29 13:38:09 -04001291 uint32_t iota[] = {0,1,2,3,4,5,6,7};
Mike Klein7a177b42019-06-17 17:17:47 -05001292 U32 X = dx + sk_unaligned_load<U32>(iota),
Mike Kleinb4379132017-10-17 16:06:49 -04001293 Y = dy;
Mike Klein581e6982017-05-03 13:05:13 -04001294
1295 // We're doing 8x8 ordered dithering, see https://en.wikipedia.org/wiki/Ordered_dithering.
1296 // In this case n=8 and we're using the matrix that looks like 1/64 x [ 0 48 12 60 ... ].
1297
1298 // We only need X and X^Y from here on, so it's easier to just think of that as "Y".
1299 Y ^= X;
1300
1301 // We'll mix the bottom 3 bits of each of X and Y to make 6 bits,
1302 // for 2^6 == 64 == 8x8 matrix values. If X=abc and Y=def, we make fcebda.
1303 U32 M = (Y & 1) << 5 | (X & 1) << 4
1304 | (Y & 2) << 2 | (X & 2) << 1
1305 | (Y & 4) >> 1 | (X & 4) >> 2;
1306
Mike Kleindb711c92017-05-03 17:57:48 -04001307 // Scale that dither to [0,1), then (-0.5,+0.5), here using 63/128 = 0.4921875 as 0.5-epsilon.
1308 // We want to make sure our dither is less than 0.5 in either direction to keep exact values
1309 // like 0 and 1 unchanged after rounding.
1310 F dither = cast(M) * (2/128.0f) - (63/128.0f);
Mike Klein581e6982017-05-03 13:05:13 -04001311
Mike Kleinf7729c22017-09-27 11:42:30 -04001312 r += *rate*dither;
1313 g += *rate*dither;
1314 b += *rate*dither;
Mike Klein7e68bc92017-05-16 12:03:15 -04001315
1316 r = max(0, min(r, a));
1317 g = max(0, min(g, a));
1318 b = max(0, min(b, a));
Mike Klein581e6982017-05-03 13:05:13 -04001319}
1320
Mike Reed9959f722017-05-15 09:34:22 -04001321// load 4 floats from memory, and splat them into r,g,b,a
Mike Kleinb11ab572018-10-24 06:42:14 -04001322STAGE(uniform_color, const SkRasterPipeline_UniformColorCtx* c) {
Mike Klein1a2e3e12017-08-03 11:24:13 -04001323 r = c->r;
1324 g = c->g;
1325 b = c->b;
1326 a = c->a;
Mike Kleine1caee12017-02-15 13:31:12 -05001327}
Mike Kleinb11ab572018-10-24 06:42:14 -04001328STAGE(unbounded_uniform_color, const SkRasterPipeline_UniformColorCtx* c) {
Mike Kleincd3e13a2018-07-10 15:52:06 +00001329 r = c->r;
1330 g = c->g;
1331 b = c->b;
1332 a = c->a;
1333}
Mike Reed9318a6c2019-08-16 16:16:25 -04001334// load 4 floats from memory, and splat them into dr,dg,db,da
1335STAGE(uniform_color_dst, const SkRasterPipeline_UniformColorCtx* c) {
1336 dr = c->r;
1337 dg = c->g;
1338 db = c->b;
1339 da = c->a;
1340}
Mike Kleine1caee12017-02-15 13:31:12 -05001341
Mike Reedc91e3872017-07-05 14:12:37 -04001342// splats opaque-black into r,g,b,a
Mike Kleinf7729c22017-09-27 11:42:30 -04001343STAGE(black_color, Ctx::None) {
Mike Reedc91e3872017-07-05 14:12:37 -04001344 r = g = b = 0.0f;
1345 a = 1.0f;
1346}
1347
Mike Kleinf7729c22017-09-27 11:42:30 -04001348STAGE(white_color, Ctx::None) {
Mike Reedc91e3872017-07-05 14:12:37 -04001349 r = g = b = a = 1.0f;
1350}
1351
Mike Reed9959f722017-05-15 09:34:22 -04001352// load registers r,g,b,a from context (mirrors store_rgba)
Mike Reed5e398c22019-03-08 11:50:35 -05001353STAGE(load_src, const float* ptr) {
Mike Klein7a177b42019-06-17 17:17:47 -05001354 r = sk_unaligned_load<F>(ptr + 0*N);
1355 g = sk_unaligned_load<F>(ptr + 1*N);
1356 b = sk_unaligned_load<F>(ptr + 2*N);
1357 a = sk_unaligned_load<F>(ptr + 3*N);
Mike Reed9959f722017-05-15 09:34:22 -04001358}
1359
1360// store registers r,g,b,a into context (mirrors load_rgba)
Mike Reed5e398c22019-03-08 11:50:35 -05001361STAGE(store_src, float* ptr) {
Mike Klein7a177b42019-06-17 17:17:47 -05001362 sk_unaligned_store(ptr + 0*N, r);
1363 sk_unaligned_store(ptr + 1*N, g);
1364 sk_unaligned_store(ptr + 2*N, b);
1365 sk_unaligned_store(ptr + 3*N, a);
Mike Reed9959f722017-05-15 09:34:22 -04001366}
1367
Mike Reed5e398c22019-03-08 11:50:35 -05001368// load registers dr,dg,db,da from context (mirrors store_dst)
1369STAGE(load_dst, const float* ptr) {
Mike Klein7a177b42019-06-17 17:17:47 -05001370 dr = sk_unaligned_load<F>(ptr + 0*N);
1371 dg = sk_unaligned_load<F>(ptr + 1*N);
1372 db = sk_unaligned_load<F>(ptr + 2*N);
1373 da = sk_unaligned_load<F>(ptr + 3*N);
Mike Reed5e398c22019-03-08 11:50:35 -05001374}
1375
1376// store registers dr,dg,db,da into context (mirrors load_dst)
1377STAGE(store_dst, float* ptr) {
Mike Klein7a177b42019-06-17 17:17:47 -05001378 sk_unaligned_store(ptr + 0*N, dr);
1379 sk_unaligned_store(ptr + 1*N, dg);
1380 sk_unaligned_store(ptr + 2*N, db);
1381 sk_unaligned_store(ptr + 3*N, da);
Mike Reed5e398c22019-03-08 11:50:35 -05001382}
1383
Mike Kleinb9c4a6f2017-04-03 13:54:55 -04001384// Most blend modes apply the same logic to each channel.
Mike Kleinaaca1e42017-03-31 09:29:01 -04001385#define BLEND_MODE(name) \
1386 SI F name##_channel(F s, F d, F sa, F da); \
Mike Kleinf7729c22017-09-27 11:42:30 -04001387 STAGE(name, Ctx::None) { \
Mike Kleinaaca1e42017-03-31 09:29:01 -04001388 r = name##_channel(r,dr,a,da); \
1389 g = name##_channel(g,dg,a,da); \
1390 b = name##_channel(b,db,a,da); \
1391 a = name##_channel(a,da,a,da); \
1392 } \
1393 SI F name##_channel(F s, F d, F sa, F da)
Mike Kleine1caee12017-02-15 13:31:12 -05001394
Mike Kleinfe560a82017-05-01 12:56:35 -04001395SI F inv(F x) { return 1.0f - x; }
Mike Klein66b09ab2017-03-31 10:29:40 -04001396SI F two(F x) { return x + x; }
Yuqian Li7741c752017-12-11 14:17:47 -05001397
Mike Kleine1caee12017-02-15 13:31:12 -05001398
Mike Kleinaaca1e42017-03-31 09:29:01 -04001399BLEND_MODE(clear) { return 0; }
1400BLEND_MODE(srcatop) { return s*da + d*inv(sa); }
1401BLEND_MODE(dstatop) { return d*sa + s*inv(da); }
1402BLEND_MODE(srcin) { return s * da; }
1403BLEND_MODE(dstin) { return d * sa; }
1404BLEND_MODE(srcout) { return s * inv(da); }
1405BLEND_MODE(dstout) { return d * inv(sa); }
1406BLEND_MODE(srcover) { return mad(d, inv(sa), s); }
1407BLEND_MODE(dstover) { return mad(s, inv(da), d); }
1408
1409BLEND_MODE(modulate) { return s*d; }
1410BLEND_MODE(multiply) { return s*inv(da) + d*inv(sa) + s*d; }
Mike Kleinb90c0802019-03-15 14:03:41 +00001411BLEND_MODE(plus_) { return min(s + d, 1.0f); } // We can clamp to either 1 or sa.
Mike Kleinaaca1e42017-03-31 09:29:01 -04001412BLEND_MODE(screen) { return s + d - s*d; }
1413BLEND_MODE(xor_) { return s*inv(da) + d*inv(sa); }
Mike Klein66b09ab2017-03-31 10:29:40 -04001414#undef BLEND_MODE
Mike Kleinb9c4a6f2017-04-03 13:54:55 -04001415
1416// Most other blend modes apply the same logic to colors, and srcover to alpha.
Mike Klein66b09ab2017-03-31 10:29:40 -04001417#define BLEND_MODE(name) \
1418 SI F name##_channel(F s, F d, F sa, F da); \
Mike Kleinf7729c22017-09-27 11:42:30 -04001419 STAGE(name, Ctx::None) { \
Mike Klein66b09ab2017-03-31 10:29:40 -04001420 r = name##_channel(r,dr,a,da); \
1421 g = name##_channel(g,dg,a,da); \
1422 b = name##_channel(b,db,a,da); \
1423 a = mad(da, inv(a), a); \
1424 } \
1425 SI F name##_channel(F s, F d, F sa, F da)
1426
1427BLEND_MODE(darken) { return s + d - max(s*da, d*sa) ; }
1428BLEND_MODE(lighten) { return s + d - min(s*da, d*sa) ; }
1429BLEND_MODE(difference) { return s + d - two(min(s*da, d*sa)); }
1430BLEND_MODE(exclusion) { return s + d - two(s*d); }
1431
Mike Klein61b84162017-03-31 11:48:14 -04001432BLEND_MODE(colorburn) {
Florin Malita59a62ed2017-08-23 12:08:37 -04001433 return if_then_else(d == da, d + s*inv(da),
1434 if_then_else(s == 0, /* s + */ d*inv(sa),
1435 sa*(da - min(da, (da-d)*sa*rcp(s))) + s*inv(da) + d*inv(sa)));
Mike Klein61b84162017-03-31 11:48:14 -04001436}
1437BLEND_MODE(colordodge) {
Florin Malita59a62ed2017-08-23 12:08:37 -04001438 return if_then_else(d == 0, /* d + */ s*inv(da),
1439 if_then_else(s == sa, s + d*inv(sa),
1440 sa*min(da, (d*sa)*rcp(sa - s)) + s*inv(da) + d*inv(sa)));
Mike Klein61b84162017-03-31 11:48:14 -04001441}
1442BLEND_MODE(hardlight) {
1443 return s*inv(da) + d*inv(sa)
1444 + if_then_else(two(s) <= sa, two(s*d), sa*da - two((da-d)*(sa-s)));
1445}
1446BLEND_MODE(overlay) {
1447 return s*inv(da) + d*inv(sa)
1448 + if_then_else(two(d) <= da, two(s*d), sa*da - two((da-d)*(sa-s)));
1449}
1450
1451BLEND_MODE(softlight) {
1452 F m = if_then_else(da > 0, d / da, 0),
1453 s2 = two(s),
1454 m4 = two(two(m));
1455
1456 // The logic forks three ways:
1457 // 1. dark src?
1458 // 2. light src, dark dst?
1459 // 3. light src, light dst?
Mike Kleinfe560a82017-05-01 12:56:35 -04001460 F darkSrc = d*(sa + (s2 - sa)*(1.0f - m)), // Used in case 1.
1461 darkDst = (m4*m4 + m4)*(m - 1.0f) + 7.0f*m, // Used in case 2.
1462 liteDst = rcp(rsqrt(m)) - m, // Used in case 3.
Mike Klein61b84162017-03-31 11:48:14 -04001463 liteSrc = d*sa + da*(s2 - sa) * if_then_else(two(two(d)) <= da, darkDst, liteDst); // 2 or 3?
1464 return s*inv(da) + d*inv(sa) + if_then_else(s2 <= sa, darkSrc, liteSrc); // 1 or (2 or 3)?
1465}
Mike Kleinb9c4a6f2017-04-03 13:54:55 -04001466#undef BLEND_MODE
Mike Klein61b84162017-03-31 11:48:14 -04001467
Mike Kleinbb338332017-05-04 12:42:52 -04001468// We're basing our implemenation of non-separable blend modes on
1469// https://www.w3.org/TR/compositing-1/#blendingnonseparable.
1470// and
1471// https://www.khronos.org/registry/OpenGL/specs/es/3.2/es_spec_3.2.pdf
1472// They're equivalent, but ES' math has been better simplified.
Mike Klein08aa88d2017-05-12 12:59:24 -04001473//
1474// Anything extra we add beyond that is to make the math work with premul inputs.
Mike Kleinbb338332017-05-04 12:42:52 -04001475
Mike Klein5d835d02019-10-16 13:28:55 -05001476SI F sat(F r, F g, F b) { return max(r, max(g,b)) - min(r, min(g,b)); }
Mike Kleinbb338332017-05-04 12:42:52 -04001477SI F lum(F r, F g, F b) { return r*0.30f + g*0.59f + b*0.11f; }
1478
1479SI void set_sat(F* r, F* g, F* b, F s) {
Mike Klein5d835d02019-10-16 13:28:55 -05001480 F mn = min(*r, min(*g,*b)),
1481 mx = max(*r, max(*g,*b)),
Mike Kleinbb338332017-05-04 12:42:52 -04001482 sat = mx - mn;
1483
1484 // Map min channel to 0, max channel to s, and scale the middle proportionally.
1485 auto scale = [=](F c) {
1486 return if_then_else(sat == 0, 0, (c - mn) * s / sat);
1487 };
1488 *r = scale(*r);
1489 *g = scale(*g);
1490 *b = scale(*b);
1491}
Mike Klein08aa88d2017-05-12 12:59:24 -04001492SI void set_lum(F* r, F* g, F* b, F l) {
1493 F diff = l - lum(*r, *g, *b);
1494 *r += diff;
1495 *g += diff;
1496 *b += diff;
1497}
1498SI void clip_color(F* r, F* g, F* b, F a) {
Mike Klein5d835d02019-10-16 13:28:55 -05001499 F mn = min(*r, min(*g, *b)),
1500 mx = max(*r, max(*g, *b)),
Mike Kleinbb338332017-05-04 12:42:52 -04001501 l = lum(*r, *g, *b);
1502
1503 auto clip = [=](F c) {
1504 c = if_then_else(mn >= 0, c, l + (c - l) * ( l) / (l - mn) );
Mike Klein08aa88d2017-05-12 12:59:24 -04001505 c = if_then_else(mx > a, l + (c - l) * (a - l) / (mx - l), c);
Mike Kleinbb338332017-05-04 12:42:52 -04001506 c = max(c, 0); // Sometimes without this we may dip just a little negative.
1507 return c;
1508 };
1509 *r = clip(*r);
1510 *g = clip(*g);
1511 *b = clip(*b);
1512}
Mike Kleinbb338332017-05-04 12:42:52 -04001513
Mike Kleinf7729c22017-09-27 11:42:30 -04001514STAGE(hue, Ctx::None) {
Mike Klein08aa88d2017-05-12 12:59:24 -04001515 F R = r*a,
1516 G = g*a,
1517 B = b*a;
Mike Kleinbb338332017-05-04 12:42:52 -04001518
Mike Klein08aa88d2017-05-12 12:59:24 -04001519 set_sat(&R, &G, &B, sat(dr,dg,db)*a);
1520 set_lum(&R, &G, &B, lum(dr,dg,db)*a);
1521 clip_color(&R,&G,&B, a*da);
Mike Kleinbb338332017-05-04 12:42:52 -04001522
Mike Klein08aa88d2017-05-12 12:59:24 -04001523 r = r*inv(da) + dr*inv(a) + R;
1524 g = g*inv(da) + dg*inv(a) + G;
1525 b = b*inv(da) + db*inv(a) + B;
Mike Kleinbb338332017-05-04 12:42:52 -04001526 a = a + da - a*da;
Mike Kleinbb338332017-05-04 12:42:52 -04001527}
Mike Kleinf7729c22017-09-27 11:42:30 -04001528STAGE(saturation, Ctx::None) {
Mike Klein08aa88d2017-05-12 12:59:24 -04001529 F R = dr*a,
1530 G = dg*a,
1531 B = db*a;
Mike Kleinbb338332017-05-04 12:42:52 -04001532
Mike Klein08aa88d2017-05-12 12:59:24 -04001533 set_sat(&R, &G, &B, sat( r, g, b)*da);
1534 set_lum(&R, &G, &B, lum(dr,dg,db)* a); // (This is not redundant.)
1535 clip_color(&R,&G,&B, a*da);
Mike Kleinbb338332017-05-04 12:42:52 -04001536
Mike Klein08aa88d2017-05-12 12:59:24 -04001537 r = r*inv(da) + dr*inv(a) + R;
1538 g = g*inv(da) + dg*inv(a) + G;
1539 b = b*inv(da) + db*inv(a) + B;
Mike Kleinbb338332017-05-04 12:42:52 -04001540 a = a + da - a*da;
Mike Kleinbb338332017-05-04 12:42:52 -04001541}
Mike Kleinf7729c22017-09-27 11:42:30 -04001542STAGE(color, Ctx::None) {
Mike Klein08aa88d2017-05-12 12:59:24 -04001543 F R = r*da,
1544 G = g*da,
1545 B = b*da;
Mike Kleinbb338332017-05-04 12:42:52 -04001546
Mike Klein08aa88d2017-05-12 12:59:24 -04001547 set_lum(&R, &G, &B, lum(dr,dg,db)*a);
1548 clip_color(&R,&G,&B, a*da);
Mike Kleinbb338332017-05-04 12:42:52 -04001549
Mike Klein08aa88d2017-05-12 12:59:24 -04001550 r = r*inv(da) + dr*inv(a) + R;
1551 g = g*inv(da) + dg*inv(a) + G;
1552 b = b*inv(da) + db*inv(a) + B;
Mike Kleinbb338332017-05-04 12:42:52 -04001553 a = a + da - a*da;
Mike Kleinbb338332017-05-04 12:42:52 -04001554}
Mike Kleinf7729c22017-09-27 11:42:30 -04001555STAGE(luminosity, Ctx::None) {
Mike Klein08aa88d2017-05-12 12:59:24 -04001556 F R = dr*a,
1557 G = dg*a,
1558 B = db*a;
Mike Kleinbb338332017-05-04 12:42:52 -04001559
Mike Klein08aa88d2017-05-12 12:59:24 -04001560 set_lum(&R, &G, &B, lum(r,g,b)*da);
1561 clip_color(&R,&G,&B, a*da);
Mike Kleinbb338332017-05-04 12:42:52 -04001562
Mike Klein08aa88d2017-05-12 12:59:24 -04001563 r = r*inv(da) + dr*inv(a) + R;
1564 g = g*inv(da) + dg*inv(a) + G;
1565 b = b*inv(da) + db*inv(a) + B;
Mike Kleinbb338332017-05-04 12:42:52 -04001566 a = a + da - a*da;
Mike Kleinbb338332017-05-04 12:42:52 -04001567}
1568
Mike Kleinb11ab572018-10-24 06:42:14 -04001569STAGE(srcover_rgba_8888, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Kleinb4379132017-10-17 16:06:49 -04001570 auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
Mike Klein50626262017-05-25 13:06:57 -04001571
1572 U32 dst = load<U32>(ptr, tail);
1573 dr = cast((dst ) & 0xff);
1574 dg = cast((dst >> 8) & 0xff);
1575 db = cast((dst >> 16) & 0xff);
1576 da = cast((dst >> 24) );
1577 // {dr,dg,db,da} are in [0,255]
Mike Klein37155d42017-12-15 09:55:03 -05001578 // { r, g, b, a} are in [0, 1] (but may be out of gamut)
Mike Klein50626262017-05-25 13:06:57 -04001579
1580 r = mad(dr, inv(a), r*255.0f);
1581 g = mad(dg, inv(a), g*255.0f);
1582 b = mad(db, inv(a), b*255.0f);
1583 a = mad(da, inv(a), a*255.0f);
Mike Klein37155d42017-12-15 09:55:03 -05001584 // { r, g, b, a} are now in [0,255] (but may be out of gamut)
Mike Klein50626262017-05-25 13:06:57 -04001585
Mike Klein37155d42017-12-15 09:55:03 -05001586 // to_unorm() clamps back to gamut. Scaling by 1 since we're already 255-biased.
1587 dst = to_unorm(r, 1, 255)
1588 | to_unorm(g, 1, 255) << 8
1589 | to_unorm(b, 1, 255) << 16
1590 | to_unorm(a, 1, 255) << 24;
Mike Klein50626262017-05-25 13:06:57 -04001591 store(ptr, dst, tail);
1592}
1593
Mike Kleinf7729c22017-09-27 11:42:30 -04001594STAGE(clamp_0, Ctx::None) {
Mike Kleine1caee12017-02-15 13:31:12 -05001595 r = max(r, 0);
1596 g = max(g, 0);
1597 b = max(b, 0);
1598 a = max(a, 0);
1599}
1600
Mike Kleinf7729c22017-09-27 11:42:30 -04001601STAGE(clamp_1, Ctx::None) {
Mike Kleinfe560a82017-05-01 12:56:35 -04001602 r = min(r, 1.0f);
1603 g = min(g, 1.0f);
1604 b = min(b, 1.0f);
1605 a = min(a, 1.0f);
Mike Kleine1caee12017-02-15 13:31:12 -05001606}
1607
Mike Kleinf7729c22017-09-27 11:42:30 -04001608STAGE(clamp_a, Ctx::None) {
Mike Kleinfe560a82017-05-01 12:56:35 -04001609 a = min(a, 1.0f);
Mike Kleine1caee12017-02-15 13:31:12 -05001610 r = min(r, a);
1611 g = min(g, a);
1612 b = min(b, a);
1613}
1614
Mike Kleineb50f432018-09-07 11:08:53 -04001615STAGE(clamp_gamut, Ctx::None) {
1616 // If you're using this stage, a should already be in [0,1].
1617 r = min(max(r, 0), a);
1618 g = min(max(g, 0), a);
1619 b = min(max(b, 0), a);
1620}
1621
Mike Kleinf7729c22017-09-27 11:42:30 -04001622STAGE(set_rgb, const float* rgb) {
Mike Kleind9e82252017-02-22 14:17:32 -05001623 r = rgb[0];
1624 g = rgb[1];
1625 b = rgb[2];
1626}
Mike Kleinbe569492018-09-14 09:34:21 -04001627STAGE(unbounded_set_rgb, const float* rgb) {
1628 r = rgb[0];
1629 g = rgb[1];
1630 b = rgb[2];
1631}
Mike Klein1a3eb522018-10-18 10:11:00 -04001632
Mike Kleinf7729c22017-09-27 11:42:30 -04001633STAGE(swap_rb, Ctx::None) {
Mike Kleind9e82252017-02-22 14:17:32 -05001634 auto tmp = r;
1635 r = b;
1636 b = tmp;
1637}
Mike Klein1a3eb522018-10-18 10:11:00 -04001638STAGE(swap_rb_dst, Ctx::None) {
1639 auto tmp = dr;
1640 dr = db;
1641 db = tmp;
1642}
Mike Kleind9e82252017-02-22 14:17:32 -05001643
Mike Kleinf7729c22017-09-27 11:42:30 -04001644STAGE(move_src_dst, Ctx::None) {
Mike Kleine1caee12017-02-15 13:31:12 -05001645 dr = r;
1646 dg = g;
1647 db = b;
1648 da = a;
1649}
Mike Kleinf7729c22017-09-27 11:42:30 -04001650STAGE(move_dst_src, Ctx::None) {
Mike Kleine1caee12017-02-15 13:31:12 -05001651 r = dr;
1652 g = dg;
1653 b = db;
1654 a = da;
1655}
1656
Mike Kleinf7729c22017-09-27 11:42:30 -04001657STAGE(premul, Ctx::None) {
Mike Kleine1caee12017-02-15 13:31:12 -05001658 r = r * a;
1659 g = g * a;
1660 b = b * a;
1661}
Mike Kleinf7729c22017-09-27 11:42:30 -04001662STAGE(premul_dst, Ctx::None) {
Mike Reed883c9bc2017-07-19 10:57:53 -04001663 dr = dr * da;
1664 dg = dg * da;
1665 db = db * da;
1666}
Mike Kleinf7729c22017-09-27 11:42:30 -04001667STAGE(unpremul, Ctx::None) {
Mike Kleina65f2f02017-10-11 13:05:24 -04001668 float inf = bit_cast<float>(0x7f800000);
1669 auto scale = if_then_else(1.0f/a < inf, 1.0f/a, 0);
Mike Klein08aa88d2017-05-12 12:59:24 -04001670 r *= scale;
1671 g *= scale;
1672 b *= scale;
Mike Kleine1caee12017-02-15 13:31:12 -05001673}
1674
Mike Kleinac568a92018-01-25 09:09:32 -05001675STAGE(force_opaque , Ctx::None) { a = 1; }
1676STAGE(force_opaque_dst, Ctx::None) { da = 1; }
1677
Florin Malitaa8392b72019-10-23 17:37:35 -04001678// Clamp x to [0,1], both sides inclusive (think, gradients).
1679// Even repeat and mirror funnel through a clamp to handle bad inputs like +Inf, NaN.
1680SI F clamp_01(F v) { return min(max(0, v), 1); }
1681
Mike Kleinf7729c22017-09-27 11:42:30 -04001682STAGE(rgb_to_hsl, Ctx::None) {
Mike Klein5d835d02019-10-16 13:28:55 -05001683 F mx = max(r, max(g,b)),
1684 mn = min(r, min(g,b)),
Mike Kleindb1cbcb2017-04-12 08:35:41 -04001685 d = mx - mn,
Mike Kleinfe560a82017-05-01 12:56:35 -04001686 d_rcp = 1.0f / d;
Mike Kleindb1cbcb2017-04-12 08:35:41 -04001687
Mike Kleinfe560a82017-05-01 12:56:35 -04001688 F h = (1/6.0f) *
Mike Kleindb1cbcb2017-04-12 08:35:41 -04001689 if_then_else(mx == mn, 0,
Mike Kleinfe560a82017-05-01 12:56:35 -04001690 if_then_else(mx == r, (g-b)*d_rcp + if_then_else(g < b, 6.0f, 0),
1691 if_then_else(mx == g, (b-r)*d_rcp + 2.0f,
1692 (r-g)*d_rcp + 4.0f)));
Mike Kleindb1cbcb2017-04-12 08:35:41 -04001693
Mike Kleinfe560a82017-05-01 12:56:35 -04001694 F l = (mx + mn) * 0.5f;
Mike Kleindb1cbcb2017-04-12 08:35:41 -04001695 F s = if_then_else(mx == mn, 0,
Mike Kleinfe560a82017-05-01 12:56:35 -04001696 d / if_then_else(l > 0.5f, 2.0f-mx-mn, mx+mn));
Mike Kleindb1cbcb2017-04-12 08:35:41 -04001697
1698 r = h;
1699 g = s;
1700 b = l;
1701}
Mike Kleinf7729c22017-09-27 11:42:30 -04001702STAGE(hsl_to_rgb, Ctx::None) {
Florin Malitaa8392b72019-10-23 17:37:35 -04001703 // See GrRGBToHSLFilterEffect.fp
1704
Mike Kleindb1cbcb2017-04-12 08:35:41 -04001705 F h = r,
1706 s = g,
Florin Malitaa8392b72019-10-23 17:37:35 -04001707 l = b,
1708 c = (1.0f - abs_(2.0f * l - 1)) * s;
Mike Kleindb1cbcb2017-04-12 08:35:41 -04001709
Florin Malitaa8392b72019-10-23 17:37:35 -04001710 auto hue_to_rgb = [&](F hue) {
1711 F q = clamp_01(abs_(fract(hue) * 6.0f - 3.0f) - 1.0f);
1712 return (q - 0.5f) * c + l;
Mike Kleindb1cbcb2017-04-12 08:35:41 -04001713 };
1714
Florin Malitaa8392b72019-10-23 17:37:35 -04001715 r = hue_to_rgb(h + 0.0f/3.0f);
1716 g = hue_to_rgb(h + 2.0f/3.0f);
1717 b = hue_to_rgb(h + 1.0f/3.0f);
Mike Kleindb1cbcb2017-04-12 08:35:41 -04001718}
1719
Mike Kleinfb126fa2017-08-24 13:06:23 -04001720// Derive alpha's coverage from rgb coverage and the values of src and dst alpha.
1721SI F alpha_coverage_from_rgb_coverage(F a, F da, F cr, F cg, F cb) {
Mike Klein5d835d02019-10-16 13:28:55 -05001722 return if_then_else(a < da, min(cr, min(cg,cb))
1723 , max(cr, max(cg,cb)));
Mike Kleinfb126fa2017-08-24 13:06:23 -04001724}
1725
Mike Kleinf7729c22017-09-27 11:42:30 -04001726STAGE(scale_1_float, const float* c) {
1727 r = r * *c;
1728 g = g * *c;
1729 b = b * *c;
1730 a = a * *c;
Mike Kleine3d44212017-02-24 08:21:18 -05001731}
Mike Kleinb11ab572018-10-24 06:42:14 -04001732STAGE(scale_u8, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Kleinb4379132017-10-17 16:06:49 -04001733 auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
Mike Kleine1caee12017-02-15 13:31:12 -05001734
Mike Kleinc31858b2017-03-01 13:07:40 -05001735 auto scales = load<U8>(ptr, tail);
Mike Klein40de6da2017-04-07 13:09:29 -04001736 auto c = from_byte(scales);
Mike Kleine1caee12017-02-15 13:31:12 -05001737
1738 r = r * c;
1739 g = g * c;
1740 b = b * c;
1741 a = a * c;
1742}
Mike Kleinb11ab572018-10-24 06:42:14 -04001743STAGE(scale_565, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Kleinb4379132017-10-17 16:06:49 -04001744 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
Mike Kleinfb126fa2017-08-24 13:06:23 -04001745
1746 F cr,cg,cb;
1747 from_565(load<U16>(ptr, tail), &cr, &cg, &cb);
1748
1749 F ca = alpha_coverage_from_rgb_coverage(a,da, cr,cg,cb);
1750
1751 r = r * cr;
1752 g = g * cg;
1753 b = b * cb;
1754 a = a * ca;
1755}
Mike Kleine3d44212017-02-24 08:21:18 -05001756
Mike Kleinb9c4a6f2017-04-03 13:54:55 -04001757SI F lerp(F from, F to, F t) {
1758 return mad(to-from, t, from);
1759}
1760
Mike Kleinf7729c22017-09-27 11:42:30 -04001761STAGE(lerp_1_float, const float* c) {
1762 r = lerp(dr, r, *c);
1763 g = lerp(dg, g, *c);
1764 b = lerp(db, b, *c);
1765 a = lerp(da, a, *c);
Mike Kleine3d44212017-02-24 08:21:18 -05001766}
Mike Reed79a75422019-03-15 15:45:09 -04001767STAGE(lerp_native, const float scales[]) {
Mike Klein7a177b42019-06-17 17:17:47 -05001768 auto c = sk_unaligned_load<F>(scales);
Mike Reed79a75422019-03-15 15:45:09 -04001769 r = lerp(dr, r, c);
1770 g = lerp(dg, g, c);
1771 b = lerp(db, b, c);
1772 a = lerp(da, a, c);
1773}
Mike Kleinb11ab572018-10-24 06:42:14 -04001774STAGE(lerp_u8, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Kleinb4379132017-10-17 16:06:49 -04001775 auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
Mike Klein2b767362017-02-22 13:52:40 -05001776
Mike Kleinc31858b2017-03-01 13:07:40 -05001777 auto scales = load<U8>(ptr, tail);
Mike Klein40de6da2017-04-07 13:09:29 -04001778 auto c = from_byte(scales);
Mike Klein2b767362017-02-22 13:52:40 -05001779
1780 r = lerp(dr, r, c);
1781 g = lerp(dg, g, c);
1782 b = lerp(db, b, c);
1783 a = lerp(da, a, c);
1784}
Mike Kleinb11ab572018-10-24 06:42:14 -04001785STAGE(lerp_565, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Kleinb4379132017-10-17 16:06:49 -04001786 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
Mike Kleine3d44212017-02-24 08:21:18 -05001787
1788 F cr,cg,cb;
Mike Klein5224f462017-03-07 17:29:54 -05001789 from_565(load<U16>(ptr, tail), &cr, &cg, &cb);
Mike Kleine3d44212017-02-24 08:21:18 -05001790
Mike Kleinfb126fa2017-08-24 13:06:23 -04001791 F ca = alpha_coverage_from_rgb_coverage(a,da, cr,cg,cb);
1792
Mike Kleine3d44212017-02-24 08:21:18 -05001793 r = lerp(dr, r, cr);
1794 g = lerp(dg, g, cg);
1795 b = lerp(db, b, cb);
Mike Kleinfb126fa2017-08-24 13:06:23 -04001796 a = lerp(da, a, ca);
Mike Kleine3d44212017-02-24 08:21:18 -05001797}
Mike Kleine1caee12017-02-15 13:31:12 -05001798
Mike Kleineda2ac22018-11-06 11:53:59 -05001799STAGE(emboss, const SkRasterPipeline_EmbossCtx* ctx) {
1800 auto mptr = ptr_at_xy<const uint8_t>(&ctx->mul, dx,dy),
1801 aptr = ptr_at_xy<const uint8_t>(&ctx->add, dx,dy);
1802
1803 F mul = from_byte(load<U8>(mptr, tail)),
1804 add = from_byte(load<U8>(aptr, tail));
1805
1806 r = mad(r, mul, add);
1807 g = mad(g, mul, add);
1808 b = mad(b, mul, add);
1809}
1810
Mike Kleinb11ab572018-10-24 06:42:14 -04001811STAGE(byte_tables, const void* ctx) { // TODO: rename Tables SkRasterPipeline_ByteTablesCtx
Mike Klein40de6da2017-04-07 13:09:29 -04001812 struct Tables { const uint8_t *r, *g, *b, *a; };
1813 auto tables = (const Tables*)ctx;
1814
Mike Klein37155d42017-12-15 09:55:03 -05001815 r = from_byte(gather(tables->r, to_unorm(r, 255)));
1816 g = from_byte(gather(tables->g, to_unorm(g, 255)));
1817 b = from_byte(gather(tables->b, to_unorm(b, 255)));
1818 a = from_byte(gather(tables->a, to_unorm(a, 255)));
Mike Klein40de6da2017-04-07 13:09:29 -04001819}
1820
Mike Kleinb1c77e42018-09-06 15:23:29 -04001821SI F strip_sign(F x, U32* sign) {
1822 U32 bits = bit_cast<U32>(x);
1823 *sign = bits & 0x80000000;
1824 return bit_cast<F>(bits ^ *sign);
1825}
Mike Kleinc4e40632018-09-05 15:16:52 -04001826
Mike Kleinb1c77e42018-09-06 15:23:29 -04001827SI F apply_sign(F x, U32 sign) {
1828 return bit_cast<F>(sign | bit_cast<U32>(x));
1829}
Mike Kleinc4e40632018-09-05 15:16:52 -04001830
Brian Osman5deadca2019-01-24 12:18:17 -05001831STAGE(parametric, const skcms_TransferFunction* ctx) {
Mike Klein4eebd9e2018-07-11 14:49:51 -04001832 auto fn = [&](F v) {
Mike Kleinc4e40632018-09-05 15:16:52 -04001833 U32 sign;
1834 v = strip_sign(v, &sign);
1835
Brian Osman5deadca2019-01-24 12:18:17 -05001836 F r = if_then_else(v <= ctx->d, mad(ctx->c, v, ctx->f)
1837 , approx_powf(mad(ctx->a, v, ctx->b), ctx->g) + ctx->e);
Mike Klein33d3d312018-09-05 17:52:25 -04001838 return apply_sign(r, sign);
Mike Klein4eebd9e2018-07-11 14:49:51 -04001839 };
1840 r = fn(r);
1841 g = fn(g);
1842 b = fn(b);
Mike Klein44375172017-04-17 19:32:05 -04001843}
Mike Klein44375172017-04-17 19:32:05 -04001844
Mike Klein1ce03a62019-04-23 08:00:35 -05001845STAGE(gamma_, const float* G) {
Mike Kleinc4e40632018-09-05 15:16:52 -04001846 auto fn = [&](F v) {
1847 U32 sign;
1848 v = strip_sign(v, &sign);
1849 return apply_sign(approx_powf(v, *G), sign);
1850 };
1851 r = fn(r);
1852 g = fn(g);
1853 b = fn(b);
1854}
1855
Brian Osman11e6aa82019-10-16 13:58:42 -04001856STAGE(PQish, const skcms_TransferFunction* ctx) {
1857 auto fn = [&](F v) {
1858 U32 sign;
1859 v = strip_sign(v, &sign);
1860
1861 F r = approx_powf(max(mad(ctx->b, approx_powf(v, ctx->c), ctx->a), 0)
1862 / (mad(ctx->e, approx_powf(v, ctx->c), ctx->d)),
1863 ctx->f);
1864
1865 return apply_sign(r, sign);
1866 };
1867 r = fn(r);
1868 g = fn(g);
1869 b = fn(b);
1870}
1871
1872STAGE(HLGish, const skcms_TransferFunction* ctx) {
1873 auto fn = [&](F v) {
1874 U32 sign;
1875 v = strip_sign(v, &sign);
1876
1877 const float R = ctx->a, G = ctx->b,
1878 a = ctx->c, b = ctx->d, c = ctx->e;
1879
1880 F r = if_then_else(v*R <= 1, approx_powf(v*R, G)
1881 , approx_exp((v-c)*a) + b);
1882
1883 return apply_sign(r, sign);
1884 };
1885 r = fn(r);
1886 g = fn(g);
1887 b = fn(b);
1888}
1889
1890STAGE(HLGinvish, const skcms_TransferFunction* ctx) {
1891 auto fn = [&](F v) {
1892 U32 sign;
1893 v = strip_sign(v, &sign);
1894
1895 const float R = ctx->a, G = ctx->b,
1896 a = ctx->c, b = ctx->d, c = ctx->e;
1897
1898 F r = if_then_else(v <= 1, R * approx_powf(v, G)
1899 , a * approx_log(v - b) + c);
1900
1901 return apply_sign(r, sign);
1902 };
1903 r = fn(r);
1904 g = fn(g);
1905 b = fn(b);
1906}
1907
Mike Kleinc4e40632018-09-05 15:16:52 -04001908STAGE(from_srgb, Ctx::None) {
1909 auto fn = [](F s) {
1910 U32 sign;
1911 s = strip_sign(s, &sign);
1912 auto lo = s * (1/12.92f);
1913 auto hi = mad(s*s, mad(s, 0.3000f, 0.6975f), 0.0025f);
1914 return apply_sign(if_then_else(s < 0.055f, lo, hi), sign);
1915 };
1916 r = fn(r);
1917 g = fn(g);
1918 b = fn(b);
1919}
1920STAGE(to_srgb, Ctx::None) {
1921 auto fn = [](F l) {
1922 U32 sign;
1923 l = strip_sign(l, &sign);
1924 // We tweak c and d for each instruction set to make sure fn(1) is exactly 1.
1925 #if defined(JUMPER_IS_AVX512)
1926 const float c = 1.130026340485f,
1927 d = 0.141387879848f;
1928 #elif defined(JUMPER_IS_SSE2) || defined(JUMPER_IS_SSE41) || \
1929 defined(JUMPER_IS_AVX ) || defined(JUMPER_IS_HSW )
1930 const float c = 1.130048394203f,
1931 d = 0.141357362270f;
1932 #elif defined(JUMPER_IS_NEON)
1933 const float c = 1.129999995232f,
1934 d = 0.141381442547f;
1935 #else
1936 const float c = 1.129999995232f,
1937 d = 0.141377761960f;
1938 #endif
1939 F t = rsqrt(l);
1940 auto lo = l * 12.92f;
1941 auto hi = mad(t, mad(t, -0.0024542345f, 0.013832027f), c)
1942 * rcp(d + t);
1943 return apply_sign(if_then_else(l < 0.00465985f, lo, hi), sign);
1944 };
1945 r = fn(r);
1946 g = fn(g);
1947 b = fn(b);
Mike Kleina07e4302017-08-09 13:51:35 -04001948}
1949
Mike Kleinb11ab572018-10-24 06:42:14 -04001950STAGE(load_a8, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Kleinb4379132017-10-17 16:06:49 -04001951 auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
Mike Klein420e38f2017-02-24 09:05:14 -05001952
1953 r = g = b = 0.0f;
Mike Klein40de6da2017-04-07 13:09:29 -04001954 a = from_byte(load<U8>(ptr, tail));
Mike Klein420e38f2017-02-24 09:05:14 -05001955}
Mike Kleinb11ab572018-10-24 06:42:14 -04001956STAGE(load_a8_dst, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Kleinb4379132017-10-17 16:06:49 -04001957 auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
Mike Reed279091e2017-06-27 16:58:00 -04001958
1959 dr = dg = db = 0.0f;
1960 da = from_byte(load<U8>(ptr, tail));
1961}
Mike Kleinb11ab572018-10-24 06:42:14 -04001962STAGE(gather_a8, const SkRasterPipeline_GatherCtx* ctx) {
Mike Klein21bd3e42017-04-06 16:32:29 -04001963 const uint8_t* ptr;
1964 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
1965 r = g = b = 0.0f;
Mike Klein40de6da2017-04-07 13:09:29 -04001966 a = from_byte(gather(ptr, ix));
Mike Klein21bd3e42017-04-06 16:32:29 -04001967}
Mike Kleinb11ab572018-10-24 06:42:14 -04001968STAGE(store_a8, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Kleinb4379132017-10-17 16:06:49 -04001969 auto ptr = ptr_at_xy<uint8_t>(ctx, dx,dy);
Mike Klein420e38f2017-02-24 09:05:14 -05001970
Mike Klein37155d42017-12-15 09:55:03 -05001971 U8 packed = pack(pack(to_unorm(a, 255)));
Mike Kleinc31858b2017-03-01 13:07:40 -05001972 store(ptr, packed, tail);
Mike Klein420e38f2017-02-24 09:05:14 -05001973}
1974
Mike Kleinb11ab572018-10-24 06:42:14 -04001975STAGE(load_565, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Kleinb4379132017-10-17 16:06:49 -04001976 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
Mike Klein3f81f372017-02-23 13:03:57 -05001977
Mike Klein5224f462017-03-07 17:29:54 -05001978 from_565(load<U16>(ptr, tail), &r,&g,&b);
Mike Kleinfe560a82017-05-01 12:56:35 -04001979 a = 1.0f;
Mike Klein3f81f372017-02-23 13:03:57 -05001980}
Mike Kleinb11ab572018-10-24 06:42:14 -04001981STAGE(load_565_dst, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Kleinb4379132017-10-17 16:06:49 -04001982 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
Mike Reed279091e2017-06-27 16:58:00 -04001983
1984 from_565(load<U16>(ptr, tail), &dr,&dg,&db);
1985 da = 1.0f;
1986}
Mike Kleinb11ab572018-10-24 06:42:14 -04001987STAGE(gather_565, const SkRasterPipeline_GatherCtx* ctx) {
Mike Klein21bd3e42017-04-06 16:32:29 -04001988 const uint16_t* ptr;
1989 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
1990 from_565(gather(ptr, ix), &r,&g,&b);
Mike Kleinfe560a82017-05-01 12:56:35 -04001991 a = 1.0f;
Mike Klein21bd3e42017-04-06 16:32:29 -04001992}
Mike Kleinb11ab572018-10-24 06:42:14 -04001993STAGE(store_565, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Kleinb4379132017-10-17 16:06:49 -04001994 auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
Mike Klein3f81f372017-02-23 13:03:57 -05001995
Mike Klein37155d42017-12-15 09:55:03 -05001996 U16 px = pack( to_unorm(r, 31) << 11
1997 | to_unorm(g, 63) << 5
1998 | to_unorm(b, 31) );
Mike Kleinc31858b2017-03-01 13:07:40 -05001999 store(ptr, px, tail);
Mike Klein3f81f372017-02-23 13:03:57 -05002000}
2001
Mike Kleinb11ab572018-10-24 06:42:14 -04002002STAGE(load_4444, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Kleinb4379132017-10-17 16:06:49 -04002003 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
Mike Kleinf809fef2017-03-31 13:52:45 -04002004 from_4444(load<U16>(ptr, tail), &r,&g,&b,&a);
2005}
Mike Kleinb11ab572018-10-24 06:42:14 -04002006STAGE(load_4444_dst, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Kleinb4379132017-10-17 16:06:49 -04002007 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
Mike Reed279091e2017-06-27 16:58:00 -04002008 from_4444(load<U16>(ptr, tail), &dr,&dg,&db,&da);
2009}
Mike Kleinb11ab572018-10-24 06:42:14 -04002010STAGE(gather_4444, const SkRasterPipeline_GatherCtx* ctx) {
Mike Klein21bd3e42017-04-06 16:32:29 -04002011 const uint16_t* ptr;
2012 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
2013 from_4444(gather(ptr, ix), &r,&g,&b,&a);
2014}
Mike Kleinb11ab572018-10-24 06:42:14 -04002015STAGE(store_4444, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Kleinb4379132017-10-17 16:06:49 -04002016 auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
Mike Klein37155d42017-12-15 09:55:03 -05002017 U16 px = pack( to_unorm(r, 15) << 12
2018 | to_unorm(g, 15) << 8
2019 | to_unorm(b, 15) << 4
2020 | to_unorm(a, 15) );
Mike Kleinf809fef2017-03-31 13:52:45 -04002021 store(ptr, px, tail);
2022}
2023
Mike Kleinb11ab572018-10-24 06:42:14 -04002024STAGE(load_8888, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Kleinb4379132017-10-17 16:06:49 -04002025 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
Mike Kleindec4ea82017-04-06 15:04:05 -04002026 from_8888(load<U32>(ptr, tail), &r,&g,&b,&a);
2027}
Mike Kleinb11ab572018-10-24 06:42:14 -04002028STAGE(load_8888_dst, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Kleinb4379132017-10-17 16:06:49 -04002029 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
Mike Reed279091e2017-06-27 16:58:00 -04002030 from_8888(load<U32>(ptr, tail), &dr,&dg,&db,&da);
2031}
Mike Kleinb11ab572018-10-24 06:42:14 -04002032STAGE(gather_8888, const SkRasterPipeline_GatherCtx* ctx) {
Mike Kleindec4ea82017-04-06 15:04:05 -04002033 const uint32_t* ptr;
2034 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
2035 from_8888(gather(ptr, ix), &r,&g,&b,&a);
Mike Kleine1caee12017-02-15 13:31:12 -05002036}
Mike Kleinb11ab572018-10-24 06:42:14 -04002037STAGE(store_8888, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Kleinb4379132017-10-17 16:06:49 -04002038 auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
Mike Klein3b92b692017-07-18 11:30:25 -04002039
Mike Klein37155d42017-12-15 09:55:03 -05002040 U32 px = to_unorm(r, 255)
2041 | to_unorm(g, 255) << 8
2042 | to_unorm(b, 255) << 16
2043 | to_unorm(a, 255) << 24;
Mike Klein3b92b692017-07-18 11:30:25 -04002044 store(ptr, px, tail);
2045}
2046
Brian Salomon217522c2019-06-11 15:55:30 -04002047STAGE(load_rg88, const SkRasterPipeline_MemoryCtx* ctx) {
Robert Phillipsd470e1b2019-09-04 15:05:35 -04002048 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx, dy);
2049 from_88(load<U16>(ptr, tail), &r, &g);
Brian Salomon217522c2019-06-11 15:55:30 -04002050 b = 0;
2051 a = 1;
Robert Phillipsd470e1b2019-09-04 15:05:35 -04002052}
2053STAGE(load_rg88_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2054 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx, dy);
2055 from_88(load<U16>(ptr, tail), &dr, &dg);
2056 db = 0;
2057 da = 1;
2058}
2059STAGE(gather_rg88, const SkRasterPipeline_GatherCtx* ctx) {
2060 const uint16_t* ptr;
2061 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
2062 from_88(gather(ptr, ix), &r, &g);
2063 b = 0;
2064 a = 1;
Brian Salomon217522c2019-06-11 15:55:30 -04002065}
2066STAGE(store_rg88, const SkRasterPipeline_MemoryCtx* ctx) {
Robert Phillipsd470e1b2019-09-04 15:05:35 -04002067 auto ptr = ptr_at_xy<uint16_t>(ctx, dx, dy);
2068 U16 px = pack( to_unorm(r, 255) | to_unorm(g, 255) << 8 );
Brian Salomon217522c2019-06-11 15:55:30 -04002069 store(ptr, px, tail);
2070}
2071
2072STAGE(load_a16, const SkRasterPipeline_MemoryCtx* ctx) {
2073 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
2074 r = g = b = 0;
2075 a = from_short(load<U16>(ptr, tail));
2076}
Robert Phillips429f0d32019-09-11 17:03:28 -04002077STAGE(load_a16_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2078 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx, dy);
2079 dr = dg = db = 0.0f;
2080 da = from_short(load<U16>(ptr, tail));
2081}
2082STAGE(gather_a16, const SkRasterPipeline_GatherCtx* ctx) {
2083 const uint16_t* ptr;
2084 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
2085 r = g = b = 0.0f;
2086 a = from_short(gather(ptr, ix));
2087}
Brian Salomon217522c2019-06-11 15:55:30 -04002088STAGE(store_a16, const SkRasterPipeline_MemoryCtx* ctx) {
2089 auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
2090
2091 U16 px = pack(to_unorm(a, 65535));
2092 store(ptr, px, tail);
2093}
Robert Phillips429f0d32019-09-11 17:03:28 -04002094
Brian Salomon217522c2019-06-11 15:55:30 -04002095STAGE(load_rg1616, const SkRasterPipeline_MemoryCtx* ctx) {
Robert Phillips17a3a0b2019-09-18 13:56:54 -04002096 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx, dy);
Brian Salomon217522c2019-06-11 15:55:30 -04002097 b = 0; a = 1;
2098 from_1616(load<U32>(ptr, tail), &r,&g);
2099}
Robert Phillips429f0d32019-09-11 17:03:28 -04002100STAGE(load_rg1616_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2101 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx, dy);
2102 from_1616(load<U32>(ptr, tail), &dr, &dg);
2103 db = 0;
2104 da = 1;
2105}
2106STAGE(gather_rg1616, const SkRasterPipeline_GatherCtx* ctx) {
2107 const uint32_t* ptr;
2108 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
2109 from_1616(gather(ptr, ix), &r, &g);
2110 b = 0;
2111 a = 1;
2112}
Brian Salomon217522c2019-06-11 15:55:30 -04002113STAGE(store_rg1616, const SkRasterPipeline_MemoryCtx* ctx) {
2114 auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
2115
2116 U32 px = to_unorm(r, 65535)
2117 | to_unorm(g, 65535) << 16;
2118 store(ptr, px, tail);
2119}
Robert Phillips429f0d32019-09-11 17:03:28 -04002120
Brian Salomond608e222019-06-12 17:42:58 -04002121STAGE(load_16161616, const SkRasterPipeline_MemoryCtx* ctx) {
Robert Phillips17a3a0b2019-09-18 13:56:54 -04002122 auto ptr = ptr_at_xy<const uint64_t>(ctx, dx, dy);
Brian Salomond608e222019-06-12 17:42:58 -04002123 from_16161616(load<U64>(ptr, tail), &r,&g, &b, &a);
2124}
Robert Phillips17a3a0b2019-09-18 13:56:54 -04002125STAGE(load_16161616_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2126 auto ptr = ptr_at_xy<const uint64_t>(ctx, dx, dy);
2127 from_16161616(load<U64>(ptr, tail), &dr, &dg, &db, &da);
2128}
2129STAGE(gather_16161616, const SkRasterPipeline_GatherCtx* ctx) {
2130 const uint64_t* ptr;
2131 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
2132 from_16161616(gather(ptr, ix), &r, &g, &b, &a);
2133}
Brian Salomond608e222019-06-12 17:42:58 -04002134STAGE(store_16161616, const SkRasterPipeline_MemoryCtx* ctx) {
2135 auto ptr = ptr_at_xy<uint16_t>(ctx, 4*dx,4*dy);
2136
2137 U16 R = pack(to_unorm(r, 65535)),
2138 G = pack(to_unorm(g, 65535)),
2139 B = pack(to_unorm(b, 65535)),
2140 A = pack(to_unorm(a, 65535));
2141
2142 store4(ptr,tail, R,G,B,A);
2143}
2144
Brian Salomon217522c2019-06-11 15:55:30 -04002145
Mike Kleinb11ab572018-10-24 06:42:14 -04002146STAGE(load_1010102, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Kleinac568a92018-01-25 09:09:32 -05002147 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
2148 from_1010102(load<U32>(ptr, tail), &r,&g,&b,&a);
2149}
Mike Kleinb11ab572018-10-24 06:42:14 -04002150STAGE(load_1010102_dst, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Kleinac568a92018-01-25 09:09:32 -05002151 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
2152 from_1010102(load<U32>(ptr, tail), &dr,&dg,&db,&da);
2153}
Mike Kleinb11ab572018-10-24 06:42:14 -04002154STAGE(gather_1010102, const SkRasterPipeline_GatherCtx* ctx) {
Mike Kleinac568a92018-01-25 09:09:32 -05002155 const uint32_t* ptr;
2156 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
2157 from_1010102(gather(ptr, ix), &r,&g,&b,&a);
2158}
Mike Kleinb11ab572018-10-24 06:42:14 -04002159STAGE(store_1010102, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Kleinac568a92018-01-25 09:09:32 -05002160 auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
2161
2162 U32 px = to_unorm(r, 1023)
2163 | to_unorm(g, 1023) << 10
2164 | to_unorm(b, 1023) << 20
2165 | to_unorm(a, 3) << 30;
2166 store(ptr, px, tail);
2167}
2168
Mike Kleinb11ab572018-10-24 06:42:14 -04002169STAGE(load_f16, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Kleinb4379132017-10-17 16:06:49 -04002170 auto ptr = ptr_at_xy<const uint64_t>(ctx, dx,dy);
Mike Kleine1caee12017-02-15 13:31:12 -05002171
Mike Klein114e6b32017-04-03 22:21:15 -04002172 U16 R,G,B,A;
Mike Kleinfa6eb912017-04-05 10:18:27 -04002173 load4((const uint16_t*)ptr,tail, &R,&G,&B,&A);
Mike Klein114e6b32017-04-03 22:21:15 -04002174 r = from_half(R);
2175 g = from_half(G);
2176 b = from_half(B);
2177 a = from_half(A);
Mike Kleine1caee12017-02-15 13:31:12 -05002178}
Mike Kleinb11ab572018-10-24 06:42:14 -04002179STAGE(load_f16_dst, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Kleinb4379132017-10-17 16:06:49 -04002180 auto ptr = ptr_at_xy<const uint64_t>(ctx, dx,dy);
Mike Reed279091e2017-06-27 16:58:00 -04002181
2182 U16 R,G,B,A;
2183 load4((const uint16_t*)ptr,tail, &R,&G,&B,&A);
2184 dr = from_half(R);
2185 dg = from_half(G);
2186 db = from_half(B);
2187 da = from_half(A);
2188}
Mike Kleinb11ab572018-10-24 06:42:14 -04002189STAGE(gather_f16, const SkRasterPipeline_GatherCtx* ctx) {
Mike Klein5f055f02017-04-06 20:02:11 -04002190 const uint64_t* ptr;
2191 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
2192 auto px = gather(ptr, ix);
2193
2194 U16 R,G,B,A;
2195 load4((const uint16_t*)&px,0, &R,&G,&B,&A);
2196 r = from_half(R);
2197 g = from_half(G);
2198 b = from_half(B);
2199 a = from_half(A);
2200}
Mike Kleinb11ab572018-10-24 06:42:14 -04002201STAGE(store_f16, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Kleinb4379132017-10-17 16:06:49 -04002202 auto ptr = ptr_at_xy<uint64_t>(ctx, dx,dy);
Mike Kleinfa6eb912017-04-05 10:18:27 -04002203 store4((uint16_t*)ptr,tail, to_half(r)
2204 , to_half(g)
2205 , to_half(b)
2206 , to_half(a));
Mike Kleine1caee12017-02-15 13:31:12 -05002207}
2208
Mike Kleinb11ab572018-10-24 06:42:14 -04002209STAGE(store_u16_be, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Kleinb4379132017-10-17 16:06:49 -04002210 auto ptr = ptr_at_xy<uint16_t>(ctx, 4*dx,dy);
Mike Klein3146bb92017-04-05 14:45:02 -04002211
Mike Klein37155d42017-12-15 09:55:03 -05002212 U16 R = bswap(pack(to_unorm(r, 65535))),
2213 G = bswap(pack(to_unorm(g, 65535))),
2214 B = bswap(pack(to_unorm(b, 65535))),
2215 A = bswap(pack(to_unorm(a, 65535)));
Mike Klein3146bb92017-04-05 14:45:02 -04002216
Mike Kleinb3821732017-04-17 10:58:05 -04002217 store4(ptr,tail, R,G,B,A);
Mike Klein3146bb92017-04-05 14:45:02 -04002218}
2219
Brian Salomon217522c2019-06-11 15:55:30 -04002220STAGE(load_af16, const SkRasterPipeline_MemoryCtx* ctx) {
2221 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
2222
2223 U16 A = load<U16>((const uint16_t*)ptr, tail);
2224 r = 0;
2225 g = 0;
2226 b = 0;
2227 a = from_half(A);
2228}
Robert Phillips17a3a0b2019-09-18 13:56:54 -04002229STAGE(load_af16_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2230 auto ptr = ptr_at_xy<const uint16_t>(ctx, dx, dy);
2231
2232 U16 A = load<U16>((const uint16_t*)ptr, tail);
2233 dr = dg = db = 0.0f;
2234 da = from_half(A);
2235}
2236STAGE(gather_af16, const SkRasterPipeline_GatherCtx* ctx) {
2237 const uint16_t* ptr;
2238 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
2239 r = g = b = 0.0f;
2240 a = from_half(gather(ptr, ix));
2241}
Brian Salomon217522c2019-06-11 15:55:30 -04002242STAGE(store_af16, const SkRasterPipeline_MemoryCtx* ctx) {
2243 auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
2244 store(ptr, to_half(a), tail);
2245}
2246
2247STAGE(load_rgf16, const SkRasterPipeline_MemoryCtx* ctx) {
Robert Phillips17a3a0b2019-09-18 13:56:54 -04002248 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx, dy);
Brian Salomon217522c2019-06-11 15:55:30 -04002249
2250 U16 R,G;
Robert Phillips17a3a0b2019-09-18 13:56:54 -04002251 load2((const uint16_t*)ptr, tail, &R, &G);
Brian Salomon217522c2019-06-11 15:55:30 -04002252 r = from_half(R);
2253 g = from_half(G);
2254 b = 0;
Robert Phillips17a3a0b2019-09-18 13:56:54 -04002255 a = 1;
2256}
2257STAGE(load_rgf16_dst, const SkRasterPipeline_MemoryCtx* ctx) {
2258 auto ptr = ptr_at_xy<const uint32_t>(ctx, dx, dy);
2259
2260 U16 R,G;
2261 load2((const uint16_t*)ptr, tail, &R, &G);
2262 dr = from_half(R);
2263 dg = from_half(G);
2264 db = 0;
2265 da = 1;
2266}
2267STAGE(gather_rgf16, const SkRasterPipeline_GatherCtx* ctx) {
2268 const uint32_t* ptr;
2269 U32 ix = ix_and_ptr(&ptr, ctx, r, g);
2270 auto px = gather(ptr, ix);
2271
2272 U16 R,G;
2273 load2((const uint16_t*)&px, 0, &R, &G);
2274 r = from_half(R);
2275 g = from_half(G);
2276 b = 0;
2277 a = 1;
Brian Salomon217522c2019-06-11 15:55:30 -04002278}
2279STAGE(store_rgf16, const SkRasterPipeline_MemoryCtx* ctx) {
Robert Phillips17a3a0b2019-09-18 13:56:54 -04002280 auto ptr = ptr_at_xy<uint32_t>(ctx, dx, dy);
Brian Salomon217522c2019-06-11 15:55:30 -04002281 store2((uint16_t*)ptr, tail, to_half(r)
2282 , to_half(g));
2283}
2284
Mike Kleinb11ab572018-10-24 06:42:14 -04002285STAGE(load_f32, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Klein37854712018-06-26 11:43:06 -04002286 auto ptr = ptr_at_xy<const float>(ctx, 4*dx,4*dy);
Mike Klein14987eb2017-04-06 10:22:26 -04002287 load4(ptr,tail, &r,&g,&b,&a);
2288}
Mike Kleinb11ab572018-10-24 06:42:14 -04002289STAGE(load_f32_dst, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Klein37854712018-06-26 11:43:06 -04002290 auto ptr = ptr_at_xy<const float>(ctx, 4*dx,4*dy);
Mike Reed279091e2017-06-27 16:58:00 -04002291 load4(ptr,tail, &dr,&dg,&db,&da);
2292}
Mike Kleinb11ab572018-10-24 06:42:14 -04002293STAGE(gather_f32, const SkRasterPipeline_GatherCtx* ctx) {
Mike Klein37854712018-06-26 11:43:06 -04002294 const float* ptr;
2295 U32 ix = ix_and_ptr(&ptr, ctx, r,g);
2296 r = gather(ptr, 4*ix + 0);
2297 g = gather(ptr, 4*ix + 1);
2298 b = gather(ptr, 4*ix + 2);
2299 a = gather(ptr, 4*ix + 3);
2300}
Mike Kleinb11ab572018-10-24 06:42:14 -04002301STAGE(store_f32, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Klein37854712018-06-26 11:43:06 -04002302 auto ptr = ptr_at_xy<float>(ctx, 4*dx,4*dy);
Mike Kleinfa6eb912017-04-05 10:18:27 -04002303 store4(ptr,tail, r,g,b,a);
Mike Klein94fc0fe2017-03-03 14:05:32 -05002304}
2305
Brian Salomon217522c2019-06-11 15:55:30 -04002306STAGE(load_rgf32, const SkRasterPipeline_MemoryCtx* ctx) {
2307 auto ptr = ptr_at_xy<const float>(ctx, 2*dx,2*dy);
2308 load2(ptr, tail, &r, &g);
2309 b = 0;
2310 a = 1;
2311}
2312STAGE(store_rgf32, const SkRasterPipeline_MemoryCtx* ctx) {
2313 auto ptr = ptr_at_xy<float>(ctx, 2*dx,2*dy);
2314 store2(ptr, tail, r, g);
2315}
2316
Mike Kleinb11ab572018-10-24 06:42:14 -04002317SI F exclusive_repeat(F v, const SkRasterPipeline_TileCtx* ctx) {
Mike Kleinf3b4e162017-09-22 15:32:59 -04002318 return v - floor_(v*ctx->invScale)*ctx->scale;
Mike Klein0cc60b82017-06-22 11:00:17 -07002319}
Mike Kleinb11ab572018-10-24 06:42:14 -04002320SI F exclusive_mirror(F v, const SkRasterPipeline_TileCtx* ctx) {
Mike Reed51e46d52017-06-23 14:21:25 -04002321 auto limit = ctx->scale;
2322 auto invLimit = ctx->invScale;
Mike Kleinf3b4e162017-09-22 15:32:59 -04002323 return abs_( (v-limit) - (limit+limit)*floor_((v-limit)*(invLimit*0.5f)) - limit );
Mike Klein0cc60b82017-06-22 11:00:17 -07002324}
Mike Kleinf3b4e162017-09-22 15:32:59 -04002325// Tile x or y to [0,limit) == [0,limit - 1 ulp] (think, sampling from images).
2326// The gather stages will hard clamp the output of these stages to [0,limit)...
2327// we just need to do the basic repeat or mirroring.
Mike Kleinb11ab572018-10-24 06:42:14 -04002328STAGE(repeat_x, const SkRasterPipeline_TileCtx* ctx) { r = exclusive_repeat(r, ctx); }
2329STAGE(repeat_y, const SkRasterPipeline_TileCtx* ctx) { g = exclusive_repeat(g, ctx); }
2330STAGE(mirror_x, const SkRasterPipeline_TileCtx* ctx) { r = exclusive_mirror(r, ctx); }
2331STAGE(mirror_y, const SkRasterPipeline_TileCtx* ctx) { g = exclusive_mirror(g, ctx); }
Mike Klein0cc60b82017-06-22 11:00:17 -07002332
Mike Kleina3b88952017-10-05 13:21:31 -04002333STAGE( clamp_x_1, Ctx::None) { r = clamp_01(r); }
2334STAGE(repeat_x_1, Ctx::None) { r = clamp_01(r - floor_(r)); }
2335STAGE(mirror_x_1, Ctx::None) { r = clamp_01(abs_( (r-1.0f) - two(floor_((r-1.0f)*0.5f)) - 1.0f )); }
Mike Klein9f85d682017-05-23 07:52:01 -04002336
Mike Reeddfc0e912018-02-16 12:40:18 -05002337// Decal stores a 32bit mask after checking the coordinate (x and/or y) against its domain:
2338// mask == 0x00000000 if the coordinate(s) are out of bounds
2339// mask == 0xFFFFFFFF if the coordinate(s) are in bounds
2340// After the gather stage, the r,g,b,a values are AND'd with this mask, setting them to 0
2341// if either of the coordinates were out of bounds.
2342
Mike Kleinb11ab572018-10-24 06:42:14 -04002343STAGE(decal_x, SkRasterPipeline_DecalTileCtx* ctx) {
Mike Reeddfc0e912018-02-16 12:40:18 -05002344 auto w = ctx->limit_x;
Mike Klein7a177b42019-06-17 17:17:47 -05002345 sk_unaligned_store(ctx->mask, cond_to_mask((0 <= r) & (r < w)));
Mike Reeddfc0e912018-02-16 12:40:18 -05002346}
Mike Kleinb11ab572018-10-24 06:42:14 -04002347STAGE(decal_y, SkRasterPipeline_DecalTileCtx* ctx) {
Mike Reeddfc0e912018-02-16 12:40:18 -05002348 auto h = ctx->limit_y;
Mike Klein7a177b42019-06-17 17:17:47 -05002349 sk_unaligned_store(ctx->mask, cond_to_mask((0 <= g) & (g < h)));
Mike Reeddfc0e912018-02-16 12:40:18 -05002350}
Mike Kleinb11ab572018-10-24 06:42:14 -04002351STAGE(decal_x_and_y, SkRasterPipeline_DecalTileCtx* ctx) {
Mike Reeddfc0e912018-02-16 12:40:18 -05002352 auto w = ctx->limit_x;
2353 auto h = ctx->limit_y;
Mike Klein7a177b42019-06-17 17:17:47 -05002354 sk_unaligned_store(ctx->mask,
Mike Reeddfc0e912018-02-16 12:40:18 -05002355 cond_to_mask((0 <= r) & (r < w) & (0 <= g) & (g < h)));
2356}
Mike Kleinb11ab572018-10-24 06:42:14 -04002357STAGE(check_decal_mask, SkRasterPipeline_DecalTileCtx* ctx) {
Mike Klein7a177b42019-06-17 17:17:47 -05002358 auto mask = sk_unaligned_load<U32>(ctx->mask);
Mike Reeddfc0e912018-02-16 12:40:18 -05002359 r = bit_cast<F>( bit_cast<U32>(r) & mask );
2360 g = bit_cast<F>( bit_cast<U32>(g) & mask );
2361 b = bit_cast<F>( bit_cast<U32>(b) & mask );
2362 a = bit_cast<F>( bit_cast<U32>(a) & mask );
2363}
2364
Mike Kleinb1df5e52018-10-17 17:06:03 -04002365STAGE(alpha_to_gray, Ctx::None) {
2366 r = g = b = a;
2367 a = 1;
2368}
2369STAGE(alpha_to_gray_dst, Ctx::None) {
2370 dr = dg = db = da;
2371 da = 1;
2372}
Mike Kleinda69d592019-07-11 07:38:31 -05002373STAGE(bt709_luminance_or_luma_to_alpha, Ctx::None) {
Mike Kleinfe560a82017-05-01 12:56:35 -04002374 a = r*0.2126f + g*0.7152f + b*0.0722f;
Mike Kleine9ed07d2017-03-07 12:28:11 -05002375 r = g = b = 0;
2376}
2377
Mike Kleinf7729c22017-09-27 11:42:30 -04002378STAGE(matrix_translate, const float* m) {
Mike Reed7aad8cc2017-07-05 12:33:06 -04002379 r += m[0];
2380 g += m[1];
2381}
Mike Kleinf7729c22017-09-27 11:42:30 -04002382STAGE(matrix_scale_translate, const float* m) {
Mike Kleinf04ff762017-10-20 15:50:12 -04002383 r = mad(r,m[0], m[2]);
2384 g = mad(g,m[1], m[3]);
Mike Reed7aad8cc2017-07-05 12:33:06 -04002385}
Mike Kleinf7729c22017-09-27 11:42:30 -04002386STAGE(matrix_2x3, const float* m) {
Mike Kleinb8d52752017-02-16 10:21:29 -05002387 auto R = mad(r,m[0], mad(g,m[2], m[4])),
2388 G = mad(r,m[1], mad(g,m[3], m[5]));
Mike Kleine1caee12017-02-15 13:31:12 -05002389 r = R;
2390 g = G;
2391}
Mike Kleinb82edcc2018-07-10 18:25:03 +00002392STAGE(matrix_3x3, const float* m) {
2393 auto R = mad(r,m[0], mad(g,m[3], b*m[6])),
2394 G = mad(r,m[1], mad(g,m[4], b*m[7])),
2395 B = mad(r,m[2], mad(g,m[5], b*m[8]));
2396 r = R;
2397 g = G;
2398 b = B;
2399}
Mike Kleinf7729c22017-09-27 11:42:30 -04002400STAGE(matrix_3x4, const float* m) {
Mike Kleinb8d52752017-02-16 10:21:29 -05002401 auto R = mad(r,m[0], mad(g,m[3], mad(b,m[6], m[ 9]))),
2402 G = mad(r,m[1], mad(g,m[4], mad(b,m[7], m[10]))),
2403 B = mad(r,m[2], mad(g,m[5], mad(b,m[8], m[11])));
Mike Kleine1caee12017-02-15 13:31:12 -05002404 r = R;
2405 g = G;
2406 b = B;
2407}
Mike Kleinf7729c22017-09-27 11:42:30 -04002408STAGE(matrix_4x5, const float* m) {
Mike Reed361a6402019-04-23 12:19:00 -04002409 auto R = mad(r,m[ 0], mad(g,m[ 1], mad(b,m[ 2], mad(a,m[ 3], m[ 4])))),
2410 G = mad(r,m[ 5], mad(g,m[ 6], mad(b,m[ 7], mad(a,m[ 8], m[ 9])))),
2411 B = mad(r,m[10], mad(g,m[11], mad(b,m[12], mad(a,m[13], m[14])))),
2412 A = mad(r,m[15], mad(g,m[16], mad(b,m[17], mad(a,m[18], m[19]))));
Mike Kleine9ed07d2017-03-07 12:28:11 -05002413 r = R;
2414 g = G;
2415 b = B;
2416 a = A;
2417}
Mike Kleinf7729c22017-09-27 11:42:30 -04002418STAGE(matrix_4x3, const float* m) {
Mike Reed02640952017-05-19 15:32:13 -04002419 auto X = r,
2420 Y = g;
2421
2422 r = mad(X, m[0], mad(Y, m[4], m[ 8]));
2423 g = mad(X, m[1], mad(Y, m[5], m[ 9]));
2424 b = mad(X, m[2], mad(Y, m[6], m[10]));
2425 a = mad(X, m[3], mad(Y, m[7], m[11]));
2426}
Mike Kleinf7729c22017-09-27 11:42:30 -04002427STAGE(matrix_perspective, const float* m) {
Mike Klein11d2df02017-02-24 11:51:36 -05002428 // N.B. Unlike the other matrix_ stages, this matrix is row-major.
Mike Klein11d2df02017-02-24 11:51:36 -05002429 auto R = mad(r,m[0], mad(g,m[1], m[2])),
2430 G = mad(r,m[3], mad(g,m[4], m[5])),
2431 Z = mad(r,m[6], mad(g,m[7], m[8]));
2432 r = R * rcp(Z);
2433 g = G * rcp(Z);
2434}
Mike Kleine1caee12017-02-15 13:31:12 -05002435
Mike Kleinb11ab572018-10-24 06:42:14 -04002436SI void gradient_lookup(const SkRasterPipeline_GradientCtx* c, U32 idx, F t,
Herb Derby4de13042017-05-15 10:49:39 -04002437 F* r, F* g, F* b, F* a) {
2438 F fr, br, fg, bg, fb, bb, fa, ba;
Mike Klein106e17a2017-12-12 17:07:49 -05002439#if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
Herb Derby4de13042017-05-15 10:49:39 -04002440 if (c->stopCount <=8) {
2441 fr = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[0]), idx);
2442 br = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[0]), idx);
2443 fg = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[1]), idx);
2444 bg = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[1]), idx);
2445 fb = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[2]), idx);
2446 bb = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[2]), idx);
2447 fa = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[3]), idx);
2448 ba = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[3]), idx);
2449 } else
2450#endif
2451 {
2452 fr = gather(c->fs[0], idx);
2453 br = gather(c->bs[0], idx);
2454 fg = gather(c->fs[1], idx);
2455 bg = gather(c->bs[1], idx);
2456 fb = gather(c->fs[2], idx);
2457 bb = gather(c->bs[2], idx);
2458 fa = gather(c->fs[3], idx);
2459 ba = gather(c->bs[3], idx);
Herb Derby7b4202d2017-04-10 10:52:34 -04002460 }
2461
Herb Derby4de13042017-05-15 10:49:39 -04002462 *r = mad(t, fr, br);
2463 *g = mad(t, fg, bg);
2464 *b = mad(t, fb, bb);
2465 *a = mad(t, fa, ba);
2466}
2467
Mike Kleinb11ab572018-10-24 06:42:14 -04002468STAGE(evenly_spaced_gradient, const SkRasterPipeline_GradientCtx* c) {
Herb Derby4de13042017-05-15 10:49:39 -04002469 auto t = r;
2470 auto idx = trunc_(t * (c->stopCount-1));
2471 gradient_lookup(c, idx, t, &r, &g, &b, &a);
2472}
2473
Mike Kleinb11ab572018-10-24 06:42:14 -04002474STAGE(gradient, const SkRasterPipeline_GradientCtx* c) {
Herb Derby4de13042017-05-15 10:49:39 -04002475 auto t = r;
2476 U32 idx = 0;
2477
2478 // N.B. The loop starts at 1 because idx 0 is the color to use before the first stop.
2479 for (size_t i = 1; i < c->stopCount; i++) {
2480 idx += if_then_else(t >= c->ts[i], U32(1), U32(0));
2481 }
2482
2483 gradient_lookup(c, idx, t, &r, &g, &b, &a);
Herb Derby7b4202d2017-04-10 10:52:34 -04002484}
2485
Mike Kleinf7729c22017-09-27 11:42:30 -04002486STAGE(evenly_spaced_2_stop_gradient, const void* ctx) {
Mike Kleinb11ab572018-10-24 06:42:14 -04002487 // TODO: Rename Ctx SkRasterPipeline_EvenlySpaced2StopGradientCtx.
Herb Derby7b4202d2017-04-10 10:52:34 -04002488 struct Ctx { float f[4], b[4]; };
Mike Klein8a823fa2017-04-05 17:29:26 -04002489 auto c = (const Ctx*)ctx;
Mike Kleine1caee12017-02-15 13:31:12 -05002490
2491 auto t = r;
Herb Derby7b4202d2017-04-10 10:52:34 -04002492 r = mad(t, c->f[0], c->b[0]);
2493 g = mad(t, c->f[1], c->b[1]);
2494 b = mad(t, c->f[2], c->b[2]);
2495 a = mad(t, c->f[3], c->b[3]);
Mike Kleine1caee12017-02-15 13:31:12 -05002496}
Mike Klein0a904492017-04-12 12:52:48 -04002497
Mike Kleinf7729c22017-09-27 11:42:30 -04002498STAGE(xy_to_unit_angle, Ctx::None) {
Herb Derby7eb86982017-05-02 19:04:39 -04002499 F X = r,
2500 Y = g;
2501 F xabs = abs_(X),
2502 yabs = abs_(Y);
2503
2504 F slope = min(xabs, yabs)/max(xabs, yabs);
2505 F s = slope * slope;
2506
2507 // Use a 7th degree polynomial to approximate atan.
2508 // This was generated using sollya.gforge.inria.fr.
2509 // A float optimized polynomial was generated using the following command.
2510 // P1 = fpminimax((1/(2*Pi))*atan(x),[|1,3,5,7|],[|24...|],[2^(-40),1],relative);
2511 F phi = slope
2512 * (0.15912117063999176025390625f + s
2513 * (-5.185396969318389892578125e-2f + s
2514 * (2.476101927459239959716796875e-2f + s
2515 * (-7.0547382347285747528076171875e-3f))));
2516
2517 phi = if_then_else(xabs < yabs, 1.0f/4.0f - phi, phi);
2518 phi = if_then_else(X < 0.0f , 1.0f/2.0f - phi, phi);
2519 phi = if_then_else(Y < 0.0f , 1.0f - phi , phi);
2520 phi = if_then_else(phi != phi , 0 , phi); // Check for NaN.
2521 r = phi;
2522}
2523
Mike Kleinf7729c22017-09-27 11:42:30 -04002524STAGE(xy_to_radius, Ctx::None) {
Herb Derby090fbf82017-05-08 15:10:36 -04002525 F X2 = r * r,
2526 Y2 = g * g;
Mike Kleinfd35c742017-05-15 15:55:54 -04002527 r = sqrt_(X2 + Y2);
Herb Derby090fbf82017-05-08 15:10:36 -04002528}
2529
Yuqian Lid208a882018-01-04 10:08:42 -05002530// Please see https://skia.org/dev/design/conical for how our 2pt conical shader works.
2531
2532STAGE(negate_x, Ctx::None) { r = -r; }
2533
Mike Kleinb11ab572018-10-24 06:42:14 -04002534STAGE(xy_to_2pt_conical_strip, const SkRasterPipeline_2PtConicalCtx* ctx) {
Yuqian Lid208a882018-01-04 10:08:42 -05002535 F x = r, y = g, &t = r;
2536 t = x + sqrt_(ctx->fP0 - y*y); // ctx->fP0 = r0 * r0
2537}
2538
2539STAGE(xy_to_2pt_conical_focal_on_circle, Ctx::None) {
2540 F x = r, y = g, &t = r;
2541 t = x + y*y / x; // (x^2 + y^2) / x
2542}
2543
Mike Kleinb11ab572018-10-24 06:42:14 -04002544STAGE(xy_to_2pt_conical_well_behaved, const SkRasterPipeline_2PtConicalCtx* ctx) {
Yuqian Lid208a882018-01-04 10:08:42 -05002545 F x = r, y = g, &t = r;
2546 t = sqrt_(x*x + y*y) - x * ctx->fP0; // ctx->fP0 = 1/r1
2547}
2548
Mike Kleinb11ab572018-10-24 06:42:14 -04002549STAGE(xy_to_2pt_conical_greater, const SkRasterPipeline_2PtConicalCtx* ctx) {
Yuqian Lid208a882018-01-04 10:08:42 -05002550 F x = r, y = g, &t = r;
2551 t = sqrt_(x*x - y*y) - x * ctx->fP0; // ctx->fP0 = 1/r1
2552}
2553
Mike Kleinb11ab572018-10-24 06:42:14 -04002554STAGE(xy_to_2pt_conical_smaller, const SkRasterPipeline_2PtConicalCtx* ctx) {
Yuqian Lid208a882018-01-04 10:08:42 -05002555 F x = r, y = g, &t = r;
2556 t = -sqrt_(x*x - y*y) - x * ctx->fP0; // ctx->fP0 = 1/r1
2557}
2558
Mike Kleinb11ab572018-10-24 06:42:14 -04002559STAGE(alter_2pt_conical_compensate_focal, const SkRasterPipeline_2PtConicalCtx* ctx) {
Yuqian Lid208a882018-01-04 10:08:42 -05002560 F& t = r;
2561 t = t + ctx->fP1; // ctx->fP1 = f
2562}
2563
2564STAGE(alter_2pt_conical_unswap, Ctx::None) {
2565 F& t = r;
2566 t = 1 - t;
2567}
2568
Mike Kleinb11ab572018-10-24 06:42:14 -04002569STAGE(mask_2pt_conical_nan, SkRasterPipeline_2PtConicalCtx* c) {
Yuqian Lid208a882018-01-04 10:08:42 -05002570 F& t = r;
2571 auto is_degenerate = (t != t); // NaN
2572 t = if_then_else(is_degenerate, F(0), t);
Mike Klein7a177b42019-06-17 17:17:47 -05002573 sk_unaligned_store(&c->fMask, cond_to_mask(!is_degenerate));
Yuqian Lid208a882018-01-04 10:08:42 -05002574}
2575
Mike Kleinb11ab572018-10-24 06:42:14 -04002576STAGE(mask_2pt_conical_degenerates, SkRasterPipeline_2PtConicalCtx* c) {
Yuqian Lid208a882018-01-04 10:08:42 -05002577 F& t = r;
2578 auto is_degenerate = (t <= 0) | (t != t);
2579 t = if_then_else(is_degenerate, F(0), t);
Mike Klein7a177b42019-06-17 17:17:47 -05002580 sk_unaligned_store(&c->fMask, cond_to_mask(!is_degenerate));
Yuqian Lid208a882018-01-04 10:08:42 -05002581}
2582
Mike Kleinf7729c22017-09-27 11:42:30 -04002583STAGE(apply_vector_mask, const uint32_t* ctx) {
Mike Klein7a177b42019-06-17 17:17:47 -05002584 const U32 mask = sk_unaligned_load<U32>(ctx);
Florin Malita9026fe12017-06-29 11:03:45 -04002585 r = bit_cast<F>(bit_cast<U32>(r) & mask);
2586 g = bit_cast<F>(bit_cast<U32>(g) & mask);
2587 b = bit_cast<F>(bit_cast<U32>(b) & mask);
2588 a = bit_cast<F>(bit_cast<U32>(a) & mask);
Florin Malita2e409002017-06-28 14:46:54 -04002589}
2590
Mike Kleinb11ab572018-10-24 06:42:14 -04002591STAGE(save_xy, SkRasterPipeline_SamplerCtx* c) {
Mike Klein0a904492017-04-12 12:52:48 -04002592 // Whether bilinear or bicubic, all sample points are at the same fractional offset (fx,fy).
2593 // They're either the 4 corners of a logical 1x1 pixel or the 16 corners of a 3x3 grid
2594 // surrounding (x,y) at (0.5,0.5) off-center.
Mike Kleinfe560a82017-05-01 12:56:35 -04002595 F fx = fract(r + 0.5f),
2596 fy = fract(g + 0.5f);
Mike Klein0a904492017-04-12 12:52:48 -04002597
2598 // Samplers will need to load x and fx, or y and fy.
Mike Klein7a177b42019-06-17 17:17:47 -05002599 sk_unaligned_store(c->x, r);
2600 sk_unaligned_store(c->y, g);
2601 sk_unaligned_store(c->fx, fx);
2602 sk_unaligned_store(c->fy, fy);
Mike Klein0a904492017-04-12 12:52:48 -04002603}
2604
Mike Kleinb11ab572018-10-24 06:42:14 -04002605STAGE(accumulate, const SkRasterPipeline_SamplerCtx* c) {
Mike Klein0a904492017-04-12 12:52:48 -04002606 // Bilinear and bicubic filters are both separable, so we produce independent contributions
2607 // from x and y, multiplying them together here to get each pixel's total scale factor.
Mike Klein7a177b42019-06-17 17:17:47 -05002608 auto scale = sk_unaligned_load<F>(c->scalex)
2609 * sk_unaligned_load<F>(c->scaley);
Mike Klein0a904492017-04-12 12:52:48 -04002610 dr = mad(scale, r, dr);
2611 dg = mad(scale, g, dg);
2612 db = mad(scale, b, db);
2613 da = mad(scale, a, da);
2614}
2615
2616// In bilinear interpolation, the 4 pixels at +/- 0.5 offsets from the sample pixel center
2617// are combined in direct proportion to their area overlapping that logical query pixel.
2618// At positive offsets, the x-axis contribution to that rectangle is fx, or (1-fx) at negative x.
2619// The y-axis is symmetric.
2620
2621template <int kScale>
Mike Kleinb11ab572018-10-24 06:42:14 -04002622SI void bilinear_x(SkRasterPipeline_SamplerCtx* ctx, F* x) {
Mike Klein7a177b42019-06-17 17:17:47 -05002623 *x = sk_unaligned_load<F>(ctx->x) + (kScale * 0.5f);
2624 F fx = sk_unaligned_load<F>(ctx->fx);
Mike Klein0a904492017-04-12 12:52:48 -04002625
2626 F scalex;
Mike Kleinfe560a82017-05-01 12:56:35 -04002627 if (kScale == -1) { scalex = 1.0f - fx; }
2628 if (kScale == +1) { scalex = fx; }
Mike Klein7a177b42019-06-17 17:17:47 -05002629 sk_unaligned_store(ctx->scalex, scalex);
Mike Klein0a904492017-04-12 12:52:48 -04002630}
2631template <int kScale>
Mike Kleinb11ab572018-10-24 06:42:14 -04002632SI void bilinear_y(SkRasterPipeline_SamplerCtx* ctx, F* y) {
Mike Klein7a177b42019-06-17 17:17:47 -05002633 *y = sk_unaligned_load<F>(ctx->y) + (kScale * 0.5f);
2634 F fy = sk_unaligned_load<F>(ctx->fy);
Mike Klein0a904492017-04-12 12:52:48 -04002635
2636 F scaley;
Mike Kleinfe560a82017-05-01 12:56:35 -04002637 if (kScale == -1) { scaley = 1.0f - fy; }
2638 if (kScale == +1) { scaley = fy; }
Mike Klein7a177b42019-06-17 17:17:47 -05002639 sk_unaligned_store(ctx->scaley, scaley);
Mike Klein0a904492017-04-12 12:52:48 -04002640}
2641
Mike Kleinb11ab572018-10-24 06:42:14 -04002642STAGE(bilinear_nx, SkRasterPipeline_SamplerCtx* ctx) { bilinear_x<-1>(ctx, &r); }
2643STAGE(bilinear_px, SkRasterPipeline_SamplerCtx* ctx) { bilinear_x<+1>(ctx, &r); }
2644STAGE(bilinear_ny, SkRasterPipeline_SamplerCtx* ctx) { bilinear_y<-1>(ctx, &g); }
2645STAGE(bilinear_py, SkRasterPipeline_SamplerCtx* ctx) { bilinear_y<+1>(ctx, &g); }
Mike Klein0a904492017-04-12 12:52:48 -04002646
2647
2648// In bicubic interpolation, the 16 pixels and +/- 0.5 and +/- 1.5 offsets from the sample
2649// pixel center are combined with a non-uniform cubic filter, with higher values near the center.
2650//
2651// We break this function into two parts, one for near 0.5 offsets and one for far 1.5 offsets.
2652// See GrCubicEffect for details of this particular filter.
2653
2654SI F bicubic_near(F t) {
2655 // 1/18 + 9/18t + 27/18t^2 - 21/18t^3 == t ( t ( -21/18t + 27/18) + 9/18) + 1/18
Mike Kleinfe560a82017-05-01 12:56:35 -04002656 return mad(t, mad(t, mad((-21/18.0f), t, (27/18.0f)), (9/18.0f)), (1/18.0f));
Mike Klein0a904492017-04-12 12:52:48 -04002657}
2658SI F bicubic_far(F t) {
2659 // 0/18 + 0/18*t - 6/18t^2 + 7/18t^3 == t^2 (7/18t - 6/18)
Mike Kleinfe560a82017-05-01 12:56:35 -04002660 return (t*t)*mad((7/18.0f), t, (-6/18.0f));
Mike Klein0a904492017-04-12 12:52:48 -04002661}
2662
2663template <int kScale>
Mike Kleinb11ab572018-10-24 06:42:14 -04002664SI void bicubic_x(SkRasterPipeline_SamplerCtx* ctx, F* x) {
Mike Klein7a177b42019-06-17 17:17:47 -05002665 *x = sk_unaligned_load<F>(ctx->x) + (kScale * 0.5f);
2666 F fx = sk_unaligned_load<F>(ctx->fx);
Mike Klein0a904492017-04-12 12:52:48 -04002667
2668 F scalex;
Mike Kleinfe560a82017-05-01 12:56:35 -04002669 if (kScale == -3) { scalex = bicubic_far (1.0f - fx); }
2670 if (kScale == -1) { scalex = bicubic_near(1.0f - fx); }
2671 if (kScale == +1) { scalex = bicubic_near( fx); }
2672 if (kScale == +3) { scalex = bicubic_far ( fx); }
Mike Klein7a177b42019-06-17 17:17:47 -05002673 sk_unaligned_store(ctx->scalex, scalex);
Mike Klein0a904492017-04-12 12:52:48 -04002674}
2675template <int kScale>
Mike Kleinb11ab572018-10-24 06:42:14 -04002676SI void bicubic_y(SkRasterPipeline_SamplerCtx* ctx, F* y) {
Mike Klein7a177b42019-06-17 17:17:47 -05002677 *y = sk_unaligned_load<F>(ctx->y) + (kScale * 0.5f);
2678 F fy = sk_unaligned_load<F>(ctx->fy);
Mike Klein0a904492017-04-12 12:52:48 -04002679
2680 F scaley;
Mike Kleinfe560a82017-05-01 12:56:35 -04002681 if (kScale == -3) { scaley = bicubic_far (1.0f - fy); }
2682 if (kScale == -1) { scaley = bicubic_near(1.0f - fy); }
2683 if (kScale == +1) { scaley = bicubic_near( fy); }
2684 if (kScale == +3) { scaley = bicubic_far ( fy); }
Mike Klein7a177b42019-06-17 17:17:47 -05002685 sk_unaligned_store(ctx->scaley, scaley);
Mike Klein0a904492017-04-12 12:52:48 -04002686}
2687
Mike Kleinb11ab572018-10-24 06:42:14 -04002688STAGE(bicubic_n3x, SkRasterPipeline_SamplerCtx* ctx) { bicubic_x<-3>(ctx, &r); }
2689STAGE(bicubic_n1x, SkRasterPipeline_SamplerCtx* ctx) { bicubic_x<-1>(ctx, &r); }
2690STAGE(bicubic_p1x, SkRasterPipeline_SamplerCtx* ctx) { bicubic_x<+1>(ctx, &r); }
2691STAGE(bicubic_p3x, SkRasterPipeline_SamplerCtx* ctx) { bicubic_x<+3>(ctx, &r); }
Mike Klein0a904492017-04-12 12:52:48 -04002692
Mike Kleinb11ab572018-10-24 06:42:14 -04002693STAGE(bicubic_n3y, SkRasterPipeline_SamplerCtx* ctx) { bicubic_y<-3>(ctx, &g); }
2694STAGE(bicubic_n1y, SkRasterPipeline_SamplerCtx* ctx) { bicubic_y<-1>(ctx, &g); }
2695STAGE(bicubic_p1y, SkRasterPipeline_SamplerCtx* ctx) { bicubic_y<+1>(ctx, &g); }
2696STAGE(bicubic_p3y, SkRasterPipeline_SamplerCtx* ctx) { bicubic_y<+3>(ctx, &g); }
Mike Klein7fee90c2017-04-07 16:55:09 -04002697
Mike Kleinb11ab572018-10-24 06:42:14 -04002698STAGE(callback, SkRasterPipeline_CallbackCtx* c) {
Mike Kleinc17dc242017-04-20 16:21:57 -04002699 store4(c->rgba,0, r,g,b,a);
Mike Klein0e4d0962017-09-27 11:04:34 -04002700 c->fn(c, tail ? tail : N);
Mike Kleinc17dc242017-04-20 16:21:57 -04002701 load4(c->read_from,0, &r,&g,&b,&a);
Mike Klein7fee90c2017-04-07 16:55:09 -04002702}
Mike Kleinc2f876b2017-08-09 18:23:25 -04002703
Mike Reed019458d2019-07-17 12:23:24 -04002704// shader: void main(float x, float y, inout half4 color)
2705// colorfilter: void main(inout half4 color)
Brian Osman2b1a5442019-06-19 11:40:33 -04002706STAGE(interpreter, SkRasterPipeline_InterpreterCtx* c) {
Brian Osman4b202a32019-06-21 09:50:29 -04002707 // If N is less than the interpreter's VecWidth, then we are doing more work than necessary in
2708 // the interpreter. This is a known issue, and will be addressed at some point.
Mike Reed8c31f2b2019-07-16 16:50:14 -04002709 float xx[N], yy[N],
2710 rr[N], gg[N], bb[N], aa[N];
Brian Osman2b1a5442019-06-19 11:40:33 -04002711
Mike Reed019458d2019-07-17 12:23:24 -04002712 float* args[] = { xx, yy, rr, gg, bb, aa };
2713 float** in_args = args;
2714 int in_count = 6;
Mike Reed3fd3cc92019-06-20 12:40:30 -04002715
Mike Reed8c31f2b2019-07-16 16:50:14 -04002716 if (c->shaderConvention) {
2717 // our caller must have called seed_shader to set these
2718 sk_unaligned_store(xx, r);
2719 sk_unaligned_store(yy, g);
2720 sk_unaligned_store(rr, F(c->paintColor.fR));
2721 sk_unaligned_store(gg, F(c->paintColor.fG));
2722 sk_unaligned_store(bb, F(c->paintColor.fB));
2723 sk_unaligned_store(aa, F(c->paintColor.fA));
Mike Reed3fd3cc92019-06-20 12:40:30 -04002724 } else {
Mike Reed019458d2019-07-17 12:23:24 -04002725 in_args += 2; // skip x,y
2726 in_count = 4;
Mike Reed8c31f2b2019-07-16 16:50:14 -04002727 sk_unaligned_store(rr, r);
2728 sk_unaligned_store(gg, g);
Mike Reed3fd3cc92019-06-20 12:40:30 -04002729 sk_unaligned_store(bb, b);
2730 sk_unaligned_store(aa, a);
Mike Reed3fd3cc92019-06-20 12:40:30 -04002731 }
2732
Brian Osmanb23d66e2019-09-27 10:25:57 -04002733 SkAssertResult(c->byteCode->runStriped(c->fn, tail ? tail : N, in_args, in_count,
2734 nullptr, 0, (const float*)c->inputs, c->ninputs));
Brian Osman2b1a5442019-06-19 11:40:33 -04002735
2736 r = sk_unaligned_load<F>(rr);
2737 g = sk_unaligned_load<F>(gg);
2738 b = sk_unaligned_load<F>(bb);
2739 a = sk_unaligned_load<F>(aa);
2740}
2741
Mike Klein3cbcb732017-10-25 12:38:25 -04002742STAGE(gauss_a_to_rgba, Ctx::None) {
2743 // x = 1 - x;
2744 // exp(-x * x * 4) - 0.018f;
2745 // ... now approximate with quartic
2746 //
2747 const float c4 = -2.26661229133605957031f;
2748 const float c3 = 2.89795351028442382812f;
2749 const float c2 = 0.21345567703247070312f;
2750 const float c1 = 0.15489584207534790039f;
2751 const float c0 = 0.00030726194381713867f;
2752 a = mad(a, mad(a, mad(a, mad(a, c4, c3), c2), c1), c0);
2753 r = a;
2754 g = a;
2755 b = a;
2756}
Mike Klein1fa9c432017-12-11 09:59:47 -05002757
Mike Klein01005622019-08-13 12:22:17 -04002758SI F tile(F v, SkTileMode mode, float limit, float invLimit) {
2759 // The ix_and_ptr() calls in sample() will clamp tile()'s output, so no need to clamp here.
2760 switch (mode) {
2761 case SkTileMode::kDecal: // TODO, for now fallthrough to clamp
2762 case SkTileMode::kClamp: return v;
2763 case SkTileMode::kRepeat: return v - floor_(v*invLimit)*limit;
2764 case SkTileMode::kMirror:
2765 return abs_( (v-limit) - (limit+limit)*floor_((v-limit)*(invLimit*0.5f)) - limit );
2766 }
2767 SkUNREACHABLE;
2768}
2769
2770SI void sample(const SkRasterPipeline_SamplerCtx2* ctx, F x, F y,
2771 F* r, F* g, F* b, F* a) {
2772 x = tile(x, ctx->tileX, ctx->width , ctx->invWidth );
2773 y = tile(y, ctx->tileY, ctx->height, ctx->invHeight);
2774
2775 switch (ctx->ct) {
2776 default: *r = *g = *b = *a = 0; // TODO
2777 break;
2778
2779 case kRGBA_8888_SkColorType:
2780 case kBGRA_8888_SkColorType: {
2781 const uint32_t* ptr;
2782 U32 ix = ix_and_ptr(&ptr, ctx, x,y);
2783 from_8888(gather(ptr, ix), r,g,b,a);
2784 if (ctx->ct == kBGRA_8888_SkColorType) {
2785 std::swap(*r,*b);
2786 }
2787 } break;
2788 }
2789}
2790
2791template <int D>
2792SI void sampler(const SkRasterPipeline_SamplerCtx2* ctx,
2793 F cx, F cy, const F (&wx)[D], const F (&wy)[D],
2794 F* r, F* g, F* b, F* a) {
2795
2796 float start = -0.5f*(D-1);
2797
2798 *r = *g = *b = *a = 0;
2799 F y = cy + start;
2800 for (int j = 0; j < D; j++, y += 1.0f) {
2801 F x = cx + start;
2802 for (int i = 0; i < D; i++, x += 1.0f) {
2803 F R,G,B,A;
2804 sample(ctx, x,y, &R,&G,&B,&A);
2805
2806 F w = wx[i] * wy[j];
2807 *r = mad(w,R,*r);
2808 *g = mad(w,G,*g);
2809 *b = mad(w,B,*b);
2810 *a = mad(w,A,*a);
2811 }
2812 }
2813}
2814
2815STAGE(bilinear, const SkRasterPipeline_SamplerCtx2* ctx) {
2816 F x = r, fx = fract(x + 0.5f),
2817 y = g, fy = fract(y + 0.5f);
2818 const F wx[] = {1.0f - fx, fx};
2819 const F wy[] = {1.0f - fy, fy};
2820
2821 sampler(ctx, x,y, wx,wy, &r,&g,&b,&a);
2822}
2823STAGE(bicubic, SkRasterPipeline_SamplerCtx2* ctx) {
2824 F x = r, fx = fract(x + 0.5f),
2825 y = g, fy = fract(y + 0.5f);
2826 const F wx[] = { bicubic_far(1-fx), bicubic_near(1-fx), bicubic_near(fx), bicubic_far(fx) };
2827 const F wy[] = { bicubic_far(1-fy), bicubic_near(1-fy), bicubic_near(fy), bicubic_far(fy) };
2828
2829 sampler(ctx, x,y, wx,wy, &r,&g,&b,&a);
2830}
2831
Mike Kleinad82b402019-10-17 20:13:14 +00002832// A specialized fused image shader for clamp-x, clamp-y, non-sRGB sampling.
Mike Kleindfa1de92019-10-17 12:34:22 -05002833STAGE(bilerp_clamp_8888, const SkRasterPipeline_GatherCtx* ctx) {
Mike Kleinad82b402019-10-17 20:13:14 +00002834 // (cx,cy) are the center of our sample.
2835 F cx = r,
2836 cy = g;
Mike Kleindfa1de92019-10-17 12:34:22 -05002837
Mike Kleinad82b402019-10-17 20:13:14 +00002838 // All sample points are at the same fractional offset (fx,fy).
2839 // They're the 4 corners of a logical 1x1 pixel surrounding (x,y) at (0.5,0.5) offsets.
2840 F fx = fract(cx + 0.5f),
2841 fy = fract(cy + 0.5f);
2842
2843 // We'll accumulate the color of all four samples into {r,g,b,a} directly.
2844 r = g = b = a = 0;
2845
2846 for (float dy = -0.5f; dy <= +0.5f; dy += 1.0f)
2847 for (float dx = -0.5f; dx <= +0.5f; dx += 1.0f) {
2848 // (x,y) are the coordinates of this sample point.
2849 F x = cx + dx,
2850 y = cy + dy;
2851
2852 // ix_and_ptr() will clamp to the image's bounds for us.
2853 const uint32_t* ptr;
2854 U32 ix = ix_and_ptr(&ptr, ctx, x,y);
2855
2856 F sr,sg,sb,sa;
2857 from_8888(gather(ptr, ix), &sr,&sg,&sb,&sa);
2858
2859 // In bilinear interpolation, the 4 pixels at +/- 0.5 offsets from the sample pixel center
2860 // are combined in direct proportion to their area overlapping that logical query pixel.
2861 // At positive offsets, the x-axis contribution to that rectangle is fx,
2862 // or (1-fx) at negative x. Same deal for y.
2863 F sx = (dx > 0) ? fx : 1.0f - fx,
2864 sy = (dy > 0) ? fy : 1.0f - fy,
2865 area = sx * sy;
2866
2867 r += sr * area;
2868 g += sg * area;
2869 b += sb * area;
2870 a += sa * area;
2871 }
Mike Kleindfa1de92019-10-17 12:34:22 -05002872}
Mike Reed78eedba2019-07-31 16:39:15 -04002873
Mike Kleinad82b402019-10-17 20:13:14 +00002874// A specialized fused image shader for clamp-x, clamp-y, non-sRGB sampling.
2875STAGE(bicubic_clamp_8888, const SkRasterPipeline_GatherCtx* ctx) {
2876 // (cx,cy) are the center of our sample.
2877 F cx = r,
2878 cy = g;
2879
2880 // All sample points are at the same fractional offset (fx,fy).
2881 // They're the 4 corners of a logical 1x1 pixel surrounding (x,y) at (0.5,0.5) offsets.
2882 F fx = fract(cx + 0.5f),
2883 fy = fract(cy + 0.5f);
2884
2885 // We'll accumulate the color of all four samples into {r,g,b,a} directly.
2886 r = g = b = a = 0;
2887
2888 const F scaley[4] = {
2889 bicubic_far (1.0f - fy), bicubic_near(1.0f - fy),
2890 bicubic_near( fy), bicubic_far ( fy),
2891 };
2892 const F scalex[4] = {
2893 bicubic_far (1.0f - fx), bicubic_near(1.0f - fx),
2894 bicubic_near( fx), bicubic_far ( fx),
2895 };
2896
2897 F sample_y = cy - 1.5f;
2898 for (int yy = 0; yy <= 3; ++yy) {
2899 F sample_x = cx - 1.5f;
2900 for (int xx = 0; xx <= 3; ++xx) {
2901 F scale = scalex[xx] * scaley[yy];
2902
2903 // ix_and_ptr() will clamp to the image's bounds for us.
2904 const uint32_t* ptr;
2905 U32 ix = ix_and_ptr(&ptr, ctx, sample_x, sample_y);
2906
2907 F sr,sg,sb,sa;
2908 from_8888(gather(ptr, ix), &sr,&sg,&sb,&sa);
2909
2910 r = mad(scale, sr, r);
2911 g = mad(scale, sg, g);
2912 b = mad(scale, sb, b);
2913 a = mad(scale, sa, a);
2914
2915 sample_x += 1;
2916 }
2917 sample_y += 1;
2918 }
Mike Reed78eedba2019-07-31 16:39:15 -04002919}
2920
Brian Salomon217522c2019-06-11 15:55:30 -04002921// ~~~~~~ GrSwizzle stage ~~~~~~ //
2922
2923STAGE(swizzle, void* ctx) {
2924 auto ir = r, ig = g, ib = b, ia = a;
2925 F* o[] = {&r, &g, &b, &a};
2926 char swiz[4];
2927 memcpy(swiz, &ctx, sizeof(swiz));
2928
2929 for (int i = 0; i < 4; ++i) {
2930 switch (swiz[i]) {
2931 case 'r': *o[i] = ir; break;
2932 case 'g': *o[i] = ig; break;
2933 case 'b': *o[i] = ib; break;
2934 case 'a': *o[i] = ia; break;
Brian Salomonf30b1c12019-06-20 12:25:02 -04002935 case '0': *o[i] = F(0); break;
Brian Salomon217522c2019-06-11 15:55:30 -04002936 case '1': *o[i] = F(1); break;
2937 default: break;
2938 }
2939 }
2940}
2941
Mike Klein1b9b7d52018-02-27 10:37:40 -05002942namespace lowp {
Mike Klein419709d2018-10-11 22:05:14 -04002943#if defined(JUMPER_IS_SCALAR) || defined(SK_DISABLE_LOWP_RASTER_PIPELINE)
Mike Klein1b9b7d52018-02-27 10:37:40 -05002944 // If we're not compiled by Clang, or otherwise switched into scalar mode (old Clang, manually),
2945 // we don't generate lowp stages. All these nullptrs will tell SkJumper.cpp to always use the
2946 // highp float pipeline.
2947 #define M(st) static void (*st)(void) = nullptr;
2948 SK_RASTER_PIPELINE_STAGES(M)
2949 #undef M
2950 static void (*just_return)(void) = nullptr;
2951
2952 static void start_pipeline(size_t,size_t,size_t,size_t, void**) {}
2953
2954#else // We are compiling vector code with Clang... let's make some lowp stages!
2955
Mike Klein83e86eb2018-08-31 10:19:21 -04002956#if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
Mike Klein1b9b7d52018-02-27 10:37:40 -05002957 using U8 = uint8_t __attribute__((ext_vector_type(16)));
2958 using U16 = uint16_t __attribute__((ext_vector_type(16)));
2959 using I16 = int16_t __attribute__((ext_vector_type(16)));
2960 using I32 = int32_t __attribute__((ext_vector_type(16)));
2961 using U32 = uint32_t __attribute__((ext_vector_type(16)));
2962 using F = float __attribute__((ext_vector_type(16)));
2963#else
2964 using U8 = uint8_t __attribute__((ext_vector_type(8)));
2965 using U16 = uint16_t __attribute__((ext_vector_type(8)));
2966 using I16 = int16_t __attribute__((ext_vector_type(8)));
2967 using I32 = int32_t __attribute__((ext_vector_type(8)));
2968 using U32 = uint32_t __attribute__((ext_vector_type(8)));
2969 using F = float __attribute__((ext_vector_type(8)));
2970#endif
2971
2972static const size_t N = sizeof(U16) / sizeof(uint16_t);
2973
Mike Kleina46623b2018-03-10 10:27:24 -05002974// Once again, some platforms benefit from a restricted Stage calling convention,
2975// but others can pass tons and tons of registers and we're happy to exploit that.
2976// It's exactly the same decision and implementation strategy as the F stages above.
2977#if JUMPER_NARROW_STAGES
2978 struct Params {
2979 size_t dx, dy, tail;
2980 U16 dr,dg,db,da;
2981 };
2982 using Stage = void(ABI*)(Params*, void** program, U16 r, U16 g, U16 b, U16 a);
2983#else
2984 // We pass program as the second argument so that load_and_inc() will find it in %rsi on x86-64.
2985 using Stage = void (ABI*)(size_t tail, void** program, size_t dx, size_t dy,
2986 U16 r, U16 g, U16 b, U16 a,
2987 U16 dr, U16 dg, U16 db, U16 da);
2988#endif
Mike Klein1b9b7d52018-02-27 10:37:40 -05002989
2990static void start_pipeline(const size_t x0, const size_t y0,
2991 const size_t xlimit, const size_t ylimit, void** program) {
2992 auto start = (Stage)load_and_inc(program);
2993 for (size_t dy = y0; dy < ylimit; dy++) {
Mike Kleina46623b2018-03-10 10:27:24 -05002994 #if JUMPER_NARROW_STAGES
2995 Params params = { x0,dy,0, 0,0,0,0 };
2996 for (; params.dx + N <= xlimit; params.dx += N) {
2997 start(&params,program, 0,0,0,0);
2998 }
2999 if (size_t tail = xlimit - params.dx) {
3000 params.tail = tail;
3001 start(&params,program, 0,0,0,0);
3002 }
3003 #else
Mike Klein1b9b7d52018-02-27 10:37:40 -05003004 size_t dx = x0;
3005 for (; dx + N <= xlimit; dx += N) {
3006 start( 0,program,dx,dy, 0,0,0,0, 0,0,0,0);
3007 }
3008 if (size_t tail = xlimit - dx) {
3009 start(tail,program,dx,dy, 0,0,0,0, 0,0,0,0);
3010 }
Mike Kleina46623b2018-03-10 10:27:24 -05003011 #endif
Mike Klein1b9b7d52018-02-27 10:37:40 -05003012 }
3013}
3014
Mike Kleina46623b2018-03-10 10:27:24 -05003015#if JUMPER_NARROW_STAGES
Mike Klein4d4b3aa2018-03-21 13:07:35 -04003016 static void ABI just_return(Params*, void**, U16,U16,U16,U16) {}
Mike Kleina46623b2018-03-10 10:27:24 -05003017#else
Mike Klein4d4b3aa2018-03-21 13:07:35 -04003018 static void ABI just_return(size_t,void**,size_t,size_t, U16,U16,U16,U16, U16,U16,U16,U16) {}
Mike Kleina46623b2018-03-10 10:27:24 -05003019#endif
Mike Klein1b9b7d52018-02-27 10:37:40 -05003020
3021// All stages use the same function call ABI to chain into each other, but there are three types:
3022// GG: geometry in, geometry out -- think, a matrix
3023// GP: geometry in, pixels out. -- think, a memory gather
3024// PP: pixels in, pixels out. -- think, a blend mode
3025//
3026// (Some stages ignore their inputs or produce no logical output. That's perfectly fine.)
3027//
3028// These three STAGE_ macros let you define each type of stage,
3029// and will have (x,y) geometry and/or (r,g,b,a, dr,dg,db,da) pixel arguments as appropriate.
3030
Mike Kleina46623b2018-03-10 10:27:24 -05003031#if JUMPER_NARROW_STAGES
Mike Klein8354c522018-12-19 10:45:14 -05003032 #define STAGE_GG(name, ...) \
Mike Klein4c249ff2019-03-18 11:57:58 -05003033 SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F& x, F& y); \
Mike Klein8354c522018-12-19 10:45:14 -05003034 static void ABI name(Params* params, void** program, U16 r, U16 g, U16 b, U16 a) { \
3035 auto x = join<F>(r,g), \
3036 y = join<F>(b,a); \
Mike Klein4c249ff2019-03-18 11:57:58 -05003037 name##_k(Ctx{program}, params->dx,params->dy,params->tail, x,y); \
Mike Klein8354c522018-12-19 10:45:14 -05003038 split(x, &r,&g); \
3039 split(y, &b,&a); \
3040 auto next = (Stage)load_and_inc(program); \
3041 next(params,program, r,g,b,a); \
3042 } \
Mike Klein4c249ff2019-03-18 11:57:58 -05003043 SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F& x, F& y)
Mike Klein1b9b7d52018-02-27 10:37:40 -05003044
Mike Kleina46623b2018-03-10 10:27:24 -05003045 #define STAGE_GP(name, ...) \
3046 SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F x, F y, \
3047 U16& r, U16& g, U16& b, U16& a, \
3048 U16& dr, U16& dg, U16& db, U16& da); \
Mike Klein4d4b3aa2018-03-21 13:07:35 -04003049 static void ABI name(Params* params, void** program, U16 r, U16 g, U16 b, U16 a) { \
Mike Kleina46623b2018-03-10 10:27:24 -05003050 auto x = join<F>(r,g), \
3051 y = join<F>(b,a); \
3052 name##_k(Ctx{program}, params->dx,params->dy,params->tail, x,y, r,g,b,a, \
3053 params->dr,params->dg,params->db,params->da); \
3054 auto next = (Stage)load_and_inc(program); \
3055 next(params,program, r,g,b,a); \
3056 } \
3057 SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F x, F y, \
3058 U16& r, U16& g, U16& b, U16& a, \
3059 U16& dr, U16& dg, U16& db, U16& da)
Mike Klein1b9b7d52018-02-27 10:37:40 -05003060
Mike Kleina46623b2018-03-10 10:27:24 -05003061 #define STAGE_PP(name, ...) \
Mike Klein4c249ff2019-03-18 11:57:58 -05003062 SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, \
Mike Kleina46623b2018-03-10 10:27:24 -05003063 U16& r, U16& g, U16& b, U16& a, \
3064 U16& dr, U16& dg, U16& db, U16& da); \
Mike Klein4d4b3aa2018-03-21 13:07:35 -04003065 static void ABI name(Params* params, void** program, U16 r, U16 g, U16 b, U16 a) { \
Mike Klein4c249ff2019-03-18 11:57:58 -05003066 name##_k(Ctx{program}, params->dx,params->dy,params->tail, r,g,b,a, \
Mike Kleina46623b2018-03-10 10:27:24 -05003067 params->dr,params->dg,params->db,params->da); \
3068 auto next = (Stage)load_and_inc(program); \
3069 next(params,program, r,g,b,a); \
3070 } \
Mike Klein4c249ff2019-03-18 11:57:58 -05003071 SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, \
Mike Kleina46623b2018-03-10 10:27:24 -05003072 U16& r, U16& g, U16& b, U16& a, \
3073 U16& dr, U16& dg, U16& db, U16& da)
3074#else
3075 #define STAGE_GG(name, ...) \
Mike Klein4c249ff2019-03-18 11:57:58 -05003076 SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F& x, F& y); \
Mike Klein4d4b3aa2018-03-21 13:07:35 -04003077 static void ABI name(size_t tail, void** program, size_t dx, size_t dy, \
Mike Kleina46623b2018-03-10 10:27:24 -05003078 U16 r, U16 g, U16 b, U16 a, \
3079 U16 dr, U16 dg, U16 db, U16 da) { \
3080 auto x = join<F>(r,g), \
3081 y = join<F>(b,a); \
Mike Klein4c249ff2019-03-18 11:57:58 -05003082 name##_k(Ctx{program}, dx,dy,tail, x,y); \
Mike Kleina46623b2018-03-10 10:27:24 -05003083 split(x, &r,&g); \
3084 split(y, &b,&a); \
3085 auto next = (Stage)load_and_inc(program); \
3086 next(tail,program,dx,dy, r,g,b,a, dr,dg,db,da); \
3087 } \
Mike Klein4c249ff2019-03-18 11:57:58 -05003088 SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F& x, F& y)
Mike Kleina46623b2018-03-10 10:27:24 -05003089
3090 #define STAGE_GP(name, ...) \
3091 SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F x, F y, \
3092 U16& r, U16& g, U16& b, U16& a, \
3093 U16& dr, U16& dg, U16& db, U16& da); \
Mike Klein4d4b3aa2018-03-21 13:07:35 -04003094 static void ABI name(size_t tail, void** program, size_t dx, size_t dy, \
Mike Kleina46623b2018-03-10 10:27:24 -05003095 U16 r, U16 g, U16 b, U16 a, \
3096 U16 dr, U16 dg, U16 db, U16 da) { \
3097 auto x = join<F>(r,g), \
3098 y = join<F>(b,a); \
3099 name##_k(Ctx{program}, dx,dy,tail, x,y, r,g,b,a, dr,dg,db,da); \
3100 auto next = (Stage)load_and_inc(program); \
3101 next(tail,program,dx,dy, r,g,b,a, dr,dg,db,da); \
3102 } \
3103 SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, F x, F y, \
3104 U16& r, U16& g, U16& b, U16& a, \
3105 U16& dr, U16& dg, U16& db, U16& da)
3106
3107 #define STAGE_PP(name, ...) \
Mike Klein4c249ff2019-03-18 11:57:58 -05003108 SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, \
Mike Kleina46623b2018-03-10 10:27:24 -05003109 U16& r, U16& g, U16& b, U16& a, \
3110 U16& dr, U16& dg, U16& db, U16& da); \
Mike Klein4d4b3aa2018-03-21 13:07:35 -04003111 static void ABI name(size_t tail, void** program, size_t dx, size_t dy, \
Mike Kleina46623b2018-03-10 10:27:24 -05003112 U16 r, U16 g, U16 b, U16 a, \
3113 U16 dr, U16 dg, U16 db, U16 da) { \
Mike Klein4c249ff2019-03-18 11:57:58 -05003114 name##_k(Ctx{program}, dx,dy,tail, r,g,b,a, dr,dg,db,da); \
Mike Kleina46623b2018-03-10 10:27:24 -05003115 auto next = (Stage)load_and_inc(program); \
3116 next(tail,program,dx,dy, r,g,b,a, dr,dg,db,da); \
3117 } \
Mike Klein4c249ff2019-03-18 11:57:58 -05003118 SI void name##_k(__VA_ARGS__, size_t dx, size_t dy, size_t tail, \
Mike Kleina46623b2018-03-10 10:27:24 -05003119 U16& r, U16& g, U16& b, U16& a, \
3120 U16& dr, U16& dg, U16& db, U16& da)
3121#endif
Mike Klein1b9b7d52018-02-27 10:37:40 -05003122
3123// ~~~~~~ Commonly used helper functions ~~~~~~ //
3124
3125SI U16 div255(U16 v) {
3126#if 0
3127 return (v+127)/255; // The ideal rounding divide by 255.
Mike Klein73d7ffc2018-07-25 09:19:23 -04003128#elif 1 && defined(JUMPER_IS_NEON)
Mike Kleind8853ec2018-03-10 11:34:53 -05003129 // With NEON we can compute (v+127)/255 as (v + ((v+128)>>8) + 128)>>8
3130 // just as fast as we can do the approximation below, so might as well be correct!
3131 // First we compute v + ((v+128)>>8), then one more round of (...+128)>>8 to finish up.
3132 return vrshrq_n_u16(vrsraq_n_u16(v, v, 8), 8);
Mike Klein1b9b7d52018-02-27 10:37:40 -05003133#else
3134 return (v+255)/256; // A good approximation of (v+127)/255.
3135#endif
3136}
3137
3138SI U16 inv(U16 v) { return 255-v; }
3139
3140SI U16 if_then_else(I16 c, U16 t, U16 e) { return (t & c) | (e & ~c); }
3141SI U32 if_then_else(I32 c, U32 t, U32 e) { return (t & c) | (e & ~c); }
3142
3143SI U16 max(U16 x, U16 y) { return if_then_else(x < y, y, x); }
3144SI U16 min(U16 x, U16 y) { return if_then_else(x < y, x, y); }
Mike Klein1b9b7d52018-02-27 10:37:40 -05003145
3146SI U16 from_float(float f) { return f * 255.0f + 0.5f; }
3147
3148SI U16 lerp(U16 from, U16 to, U16 t) { return div255( from*inv(t) + to*t ); }
3149
3150template <typename D, typename S>
3151SI D cast(S src) {
3152 return __builtin_convertvector(src, D);
3153}
3154
3155template <typename D, typename S>
3156SI void split(S v, D* lo, D* hi) {
3157 static_assert(2*sizeof(D) == sizeof(S), "");
3158 memcpy(lo, (const char*)&v + 0*sizeof(D), sizeof(D));
3159 memcpy(hi, (const char*)&v + 1*sizeof(D), sizeof(D));
3160}
3161template <typename D, typename S>
3162SI D join(S lo, S hi) {
3163 static_assert(sizeof(D) == 2*sizeof(S), "");
3164 D v;
3165 memcpy((char*)&v + 0*sizeof(S), &lo, sizeof(S));
3166 memcpy((char*)&v + 1*sizeof(S), &hi, sizeof(S));
3167 return v;
3168}
Mike Klein1b9b7d52018-02-27 10:37:40 -05003169
3170SI F if_then_else(I32 c, F t, F e) {
3171 return bit_cast<F>( (bit_cast<I32>(t) & c) | (bit_cast<I32>(e) & ~c) );
3172}
3173SI F max(F x, F y) { return if_then_else(x < y, y, x); }
3174SI F min(F x, F y) { return if_then_else(x < y, x, y); }
3175
3176SI F mad(F f, F m, F a) { return f*m+a; }
3177SI U32 trunc_(F x) { return (U32)cast<I32>(x); }
3178
3179SI F rcp(F x) {
Mike Klein83e86eb2018-08-31 10:19:21 -04003180#if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
Mike Kleine304a8a2018-05-31 10:49:51 -04003181 __m256 lo,hi;
3182 split(x, &lo,&hi);
3183 return join<F>(_mm256_rcp_ps(lo), _mm256_rcp_ps(hi));
Mike Klein83e86eb2018-08-31 10:19:21 -04003184#elif defined(JUMPER_IS_SSE2) || defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
Mike Kleine304a8a2018-05-31 10:49:51 -04003185 __m128 lo,hi;
3186 split(x, &lo,&hi);
3187 return join<F>(_mm_rcp_ps(lo), _mm_rcp_ps(hi));
Mike Klein73d7ffc2018-07-25 09:19:23 -04003188#elif defined(JUMPER_IS_NEON)
Mike Kleine304a8a2018-05-31 10:49:51 -04003189 auto rcp = [](float32x4_t v) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003190 auto est = vrecpeq_f32(v);
3191 return vrecpsq_f32(v,est)*est;
Mike Kleine304a8a2018-05-31 10:49:51 -04003192 };
3193 float32x4_t lo,hi;
3194 split(x, &lo,&hi);
3195 return join<F>(rcp(lo), rcp(hi));
Mike Klein1b9b7d52018-02-27 10:37:40 -05003196#else
3197 return 1.0f / x;
3198#endif
3199}
3200SI F sqrt_(F x) {
Mike Klein83e86eb2018-08-31 10:19:21 -04003201#if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
Mike Kleine304a8a2018-05-31 10:49:51 -04003202 __m256 lo,hi;
3203 split(x, &lo,&hi);
3204 return join<F>(_mm256_sqrt_ps(lo), _mm256_sqrt_ps(hi));
Mike Klein83e86eb2018-08-31 10:19:21 -04003205#elif defined(JUMPER_IS_SSE2) || defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
Mike Kleine304a8a2018-05-31 10:49:51 -04003206 __m128 lo,hi;
3207 split(x, &lo,&hi);
3208 return join<F>(_mm_sqrt_ps(lo), _mm_sqrt_ps(hi));
Mike Klein15eb1e92018-08-31 11:21:27 -04003209#elif defined(SK_CPU_ARM64)
Mike Kleine304a8a2018-05-31 10:49:51 -04003210 float32x4_t lo,hi;
3211 split(x, &lo,&hi);
3212 return join<F>(vsqrtq_f32(lo), vsqrtq_f32(hi));
Mike Klein73d7ffc2018-07-25 09:19:23 -04003213#elif defined(JUMPER_IS_NEON)
Mike Kleine304a8a2018-05-31 10:49:51 -04003214 auto sqrt = [](float32x4_t v) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003215 auto est = vrsqrteq_f32(v); // Estimate and two refinement steps for est = rsqrt(v).
3216 est *= vrsqrtsq_f32(v,est*est);
3217 est *= vrsqrtsq_f32(v,est*est);
3218 return v*est; // sqrt(v) == v*rsqrt(v).
Mike Kleine304a8a2018-05-31 10:49:51 -04003219 };
3220 float32x4_t lo,hi;
3221 split(x, &lo,&hi);
3222 return join<F>(sqrt(lo), sqrt(hi));
Mike Klein1b9b7d52018-02-27 10:37:40 -05003223#else
3224 return F{
3225 sqrtf(x[0]), sqrtf(x[1]), sqrtf(x[2]), sqrtf(x[3]),
3226 sqrtf(x[4]), sqrtf(x[5]), sqrtf(x[6]), sqrtf(x[7]),
3227 };
3228#endif
3229}
3230
3231SI F floor_(F x) {
Mike Klein15eb1e92018-08-31 11:21:27 -04003232#if defined(SK_CPU_ARM64)
Mike Kleine304a8a2018-05-31 10:49:51 -04003233 float32x4_t lo,hi;
3234 split(x, &lo,&hi);
3235 return join<F>(vrndmq_f32(lo), vrndmq_f32(hi));
Mike Klein83e86eb2018-08-31 10:19:21 -04003236#elif defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
Mike Kleine304a8a2018-05-31 10:49:51 -04003237 __m256 lo,hi;
3238 split(x, &lo,&hi);
3239 return join<F>(_mm256_floor_ps(lo), _mm256_floor_ps(hi));
Mike Klein83e86eb2018-08-31 10:19:21 -04003240#elif defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
Mike Kleine304a8a2018-05-31 10:49:51 -04003241 __m128 lo,hi;
3242 split(x, &lo,&hi);
3243 return join<F>(_mm_floor_ps(lo), _mm_floor_ps(hi));
Mike Klein1b9b7d52018-02-27 10:37:40 -05003244#else
3245 F roundtrip = cast<F>(cast<I32>(x));
3246 return roundtrip - if_then_else(roundtrip > x, F(1), F(0));
3247#endif
3248}
Mike Klein8e3426f2018-04-16 12:56:24 -04003249SI F fract(F x) { return x - floor_(x); }
Mike Klein1b9b7d52018-02-27 10:37:40 -05003250SI F abs_(F x) { return bit_cast<F>( bit_cast<I32>(x) & 0x7fffffff ); }
3251
3252// ~~~~~~ Basic / misc. stages ~~~~~~ //
3253
Mike Kleine8de0242018-03-10 12:37:11 -05003254STAGE_GG(seed_shader, Ctx::None) {
3255 static const float iota[] = {
3256 0.5f, 1.5f, 2.5f, 3.5f, 4.5f, 5.5f, 6.5f, 7.5f,
3257 8.5f, 9.5f,10.5f,11.5f,12.5f,13.5f,14.5f,15.5f,
3258 };
Mike Klein7a177b42019-06-17 17:17:47 -05003259 x = cast<F>(I32(dx)) + sk_unaligned_load<F>(iota);
Mike Klein1b9b7d52018-02-27 10:37:40 -05003260 y = cast<F>(I32(dy)) + 0.5f;
3261}
3262
3263STAGE_GG(matrix_translate, const float* m) {
3264 x += m[0];
3265 y += m[1];
3266}
3267STAGE_GG(matrix_scale_translate, const float* m) {
3268 x = mad(x,m[0], m[2]);
3269 y = mad(y,m[1], m[3]);
3270}
3271STAGE_GG(matrix_2x3, const float* m) {
3272 auto X = mad(x,m[0], mad(y,m[2], m[4])),
3273 Y = mad(x,m[1], mad(y,m[3], m[5]));
3274 x = X;
3275 y = Y;
3276}
3277STAGE_GG(matrix_perspective, const float* m) {
3278 // N.B. Unlike the other matrix_ stages, this matrix is row-major.
3279 auto X = mad(x,m[0], mad(y,m[1], m[2])),
3280 Y = mad(x,m[3], mad(y,m[4], m[5])),
3281 Z = mad(x,m[6], mad(y,m[7], m[8]));
3282 x = X * rcp(Z);
3283 y = Y * rcp(Z);
3284}
3285
Mike Kleinb11ab572018-10-24 06:42:14 -04003286STAGE_PP(uniform_color, const SkRasterPipeline_UniformColorCtx* c) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003287 r = c->rgba[0];
3288 g = c->rgba[1];
3289 b = c->rgba[2];
3290 a = c->rgba[3];
3291}
Mike Reed9318a6c2019-08-16 16:16:25 -04003292STAGE_PP(uniform_color_dst, const SkRasterPipeline_UniformColorCtx* c) {
3293 dr = c->rgba[0];
3294 dg = c->rgba[1];
3295 db = c->rgba[2];
3296 da = c->rgba[3];
3297}
Mike Klein1b9b7d52018-02-27 10:37:40 -05003298STAGE_PP(black_color, Ctx::None) { r = g = b = 0; a = 255; }
3299STAGE_PP(white_color, Ctx::None) { r = g = b = 255; a = 255; }
3300
3301STAGE_PP(set_rgb, const float rgb[3]) {
3302 r = from_float(rgb[0]);
3303 g = from_float(rgb[1]);
3304 b = from_float(rgb[2]);
3305}
3306
Mike Kleinea045b52018-08-23 12:13:58 -04003307STAGE_PP(clamp_0, Ctx::None) { /*definitely a noop*/ }
3308STAGE_PP(clamp_1, Ctx::None) { /*_should_ be a noop*/ }
3309
Mike Klein1b9b7d52018-02-27 10:37:40 -05003310STAGE_PP(clamp_a, Ctx::None) {
3311 r = min(r, a);
3312 g = min(g, a);
3313 b = min(b, a);
3314}
Mike Klein1b9b7d52018-02-27 10:37:40 -05003315
Mike Kleineb50f432018-09-07 11:08:53 -04003316STAGE_PP(clamp_gamut, Ctx::None) {
3317 // It shouldn't be possible to get out-of-gamut
3318 // colors when working in lowp.
3319}
3320
Mike Klein1b9b7d52018-02-27 10:37:40 -05003321STAGE_PP(premul, Ctx::None) {
3322 r = div255(r * a);
3323 g = div255(g * a);
3324 b = div255(b * a);
3325}
3326STAGE_PP(premul_dst, Ctx::None) {
3327 dr = div255(dr * da);
3328 dg = div255(dg * da);
3329 db = div255(db * da);
3330}
3331
3332STAGE_PP(force_opaque , Ctx::None) { a = 255; }
3333STAGE_PP(force_opaque_dst, Ctx::None) { da = 255; }
3334
3335STAGE_PP(swap_rb, Ctx::None) {
3336 auto tmp = r;
3337 r = b;
3338 b = tmp;
3339}
Mike Klein1a3eb522018-10-18 10:11:00 -04003340STAGE_PP(swap_rb_dst, Ctx::None) {
3341 auto tmp = dr;
3342 dr = db;
3343 db = tmp;
3344}
Mike Klein1b9b7d52018-02-27 10:37:40 -05003345
3346STAGE_PP(move_src_dst, Ctx::None) {
3347 dr = r;
3348 dg = g;
3349 db = b;
3350 da = a;
3351}
3352
3353STAGE_PP(move_dst_src, Ctx::None) {
3354 r = dr;
3355 g = dg;
3356 b = db;
3357 a = da;
3358}
3359
Mike Klein1b9b7d52018-02-27 10:37:40 -05003360// ~~~~~~ Blend modes ~~~~~~ //
3361
3362// The same logic applied to all 4 channels.
3363#define BLEND_MODE(name) \
3364 SI U16 name##_channel(U16 s, U16 d, U16 sa, U16 da); \
3365 STAGE_PP(name, Ctx::None) { \
3366 r = name##_channel(r,dr,a,da); \
3367 g = name##_channel(g,dg,a,da); \
3368 b = name##_channel(b,db,a,da); \
3369 a = name##_channel(a,da,a,da); \
3370 } \
3371 SI U16 name##_channel(U16 s, U16 d, U16 sa, U16 da)
3372
3373 BLEND_MODE(clear) { return 0; }
3374 BLEND_MODE(srcatop) { return div255( s*da + d*inv(sa) ); }
3375 BLEND_MODE(dstatop) { return div255( d*sa + s*inv(da) ); }
3376 BLEND_MODE(srcin) { return div255( s*da ); }
3377 BLEND_MODE(dstin) { return div255( d*sa ); }
3378 BLEND_MODE(srcout) { return div255( s*inv(da) ); }
3379 BLEND_MODE(dstout) { return div255( d*inv(sa) ); }
3380 BLEND_MODE(srcover) { return s + div255( d*inv(sa) ); }
3381 BLEND_MODE(dstover) { return d + div255( s*inv(da) ); }
3382 BLEND_MODE(modulate) { return div255( s*d ); }
3383 BLEND_MODE(multiply) { return div255( s*inv(da) + d*inv(sa) + s*d ); }
Mike Kleinb90c0802019-03-15 14:03:41 +00003384 BLEND_MODE(plus_) { return min(s+d, 255); }
Mike Klein1b9b7d52018-02-27 10:37:40 -05003385 BLEND_MODE(screen) { return s + d - div255( s*d ); }
3386 BLEND_MODE(xor_) { return div255( s*inv(da) + d*inv(sa) ); }
3387#undef BLEND_MODE
3388
3389// The same logic applied to color, and srcover for alpha.
3390#define BLEND_MODE(name) \
3391 SI U16 name##_channel(U16 s, U16 d, U16 sa, U16 da); \
3392 STAGE_PP(name, Ctx::None) { \
3393 r = name##_channel(r,dr,a,da); \
3394 g = name##_channel(g,dg,a,da); \
3395 b = name##_channel(b,db,a,da); \
3396 a = a + div255( da*inv(a) ); \
3397 } \
3398 SI U16 name##_channel(U16 s, U16 d, U16 sa, U16 da)
3399
3400 BLEND_MODE(darken) { return s + d - div255( max(s*da, d*sa) ); }
3401 BLEND_MODE(lighten) { return s + d - div255( min(s*da, d*sa) ); }
3402 BLEND_MODE(difference) { return s + d - 2*div255( min(s*da, d*sa) ); }
3403 BLEND_MODE(exclusion) { return s + d - 2*div255( s*d ); }
3404
3405 BLEND_MODE(hardlight) {
3406 return div255( s*inv(da) + d*inv(sa) +
3407 if_then_else(2*s <= sa, 2*s*d, sa*da - 2*(sa-s)*(da-d)) );
3408 }
3409 BLEND_MODE(overlay) {
3410 return div255( s*inv(da) + d*inv(sa) +
3411 if_then_else(2*d <= da, 2*s*d, sa*da - 2*(sa-s)*(da-d)) );
3412 }
3413#undef BLEND_MODE
3414
3415// ~~~~~~ Helpers for interacting with memory ~~~~~~ //
3416
3417template <typename T>
Mike Kleinb11ab572018-10-24 06:42:14 -04003418SI T* ptr_at_xy(const SkRasterPipeline_MemoryCtx* ctx, size_t dx, size_t dy) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003419 return (T*)ctx->pixels + dy*ctx->stride + dx;
3420}
3421
3422template <typename T>
Mike Kleinb11ab572018-10-24 06:42:14 -04003423SI U32 ix_and_ptr(T** ptr, const SkRasterPipeline_GatherCtx* ctx, F x, F y) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003424 auto clamp = [](F v, F limit) {
3425 limit = bit_cast<F>( bit_cast<U32>(limit) - 1 ); // Exclusive -> inclusive.
3426 return min(max(0, v), limit);
3427 };
3428 x = clamp(x, ctx->width);
3429 y = clamp(y, ctx->height);
3430
3431 *ptr = (const T*)ctx->pixels;
3432 return trunc_(y)*ctx->stride + trunc_(x);
3433}
3434
3435template <typename V, typename T>
3436SI V load(const T* ptr, size_t tail) {
3437 V v = 0;
3438 switch (tail & (N-1)) {
3439 case 0: memcpy(&v, ptr, sizeof(v)); break;
Mike Klein83e86eb2018-08-31 10:19:21 -04003440 #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
Mike Klein1b9b7d52018-02-27 10:37:40 -05003441 case 15: v[14] = ptr[14];
3442 case 14: v[13] = ptr[13];
3443 case 13: v[12] = ptr[12];
3444 case 12: memcpy(&v, ptr, 12*sizeof(T)); break;
3445 case 11: v[10] = ptr[10];
3446 case 10: v[ 9] = ptr[ 9];
3447 case 9: v[ 8] = ptr[ 8];
3448 case 8: memcpy(&v, ptr, 8*sizeof(T)); break;
3449 #endif
3450 case 7: v[ 6] = ptr[ 6];
3451 case 6: v[ 5] = ptr[ 5];
3452 case 5: v[ 4] = ptr[ 4];
3453 case 4: memcpy(&v, ptr, 4*sizeof(T)); break;
3454 case 3: v[ 2] = ptr[ 2];
3455 case 2: memcpy(&v, ptr, 2*sizeof(T)); break;
3456 case 1: v[ 0] = ptr[ 0];
3457 }
3458 return v;
3459}
3460template <typename V, typename T>
3461SI void store(T* ptr, size_t tail, V v) {
3462 switch (tail & (N-1)) {
3463 case 0: memcpy(ptr, &v, sizeof(v)); break;
Mike Klein83e86eb2018-08-31 10:19:21 -04003464 #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
Mike Klein1b9b7d52018-02-27 10:37:40 -05003465 case 15: ptr[14] = v[14];
3466 case 14: ptr[13] = v[13];
3467 case 13: ptr[12] = v[12];
3468 case 12: memcpy(ptr, &v, 12*sizeof(T)); break;
3469 case 11: ptr[10] = v[10];
3470 case 10: ptr[ 9] = v[ 9];
3471 case 9: ptr[ 8] = v[ 8];
3472 case 8: memcpy(ptr, &v, 8*sizeof(T)); break;
3473 #endif
3474 case 7: ptr[ 6] = v[ 6];
3475 case 6: ptr[ 5] = v[ 5];
3476 case 5: ptr[ 4] = v[ 4];
3477 case 4: memcpy(ptr, &v, 4*sizeof(T)); break;
3478 case 3: ptr[ 2] = v[ 2];
3479 case 2: memcpy(ptr, &v, 2*sizeof(T)); break;
3480 case 1: ptr[ 0] = v[ 0];
3481 }
3482}
3483
Mike Klein83e86eb2018-08-31 10:19:21 -04003484#if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
Mike Klein1b9b7d52018-02-27 10:37:40 -05003485 template <typename V, typename T>
3486 SI V gather(const T* ptr, U32 ix) {
3487 return V{ ptr[ix[ 0]], ptr[ix[ 1]], ptr[ix[ 2]], ptr[ix[ 3]],
3488 ptr[ix[ 4]], ptr[ix[ 5]], ptr[ix[ 6]], ptr[ix[ 7]],
3489 ptr[ix[ 8]], ptr[ix[ 9]], ptr[ix[10]], ptr[ix[11]],
3490 ptr[ix[12]], ptr[ix[13]], ptr[ix[14]], ptr[ix[15]], };
3491 }
3492
3493 template<>
Kevin Lubickb5502b22018-03-12 10:17:06 -04003494 F gather(const float* ptr, U32 ix) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003495 __m256i lo, hi;
3496 split(ix, &lo, &hi);
3497
Kevin Lubickb5502b22018-03-12 10:17:06 -04003498 return join<F>(_mm256_i32gather_ps(ptr, lo, 4),
3499 _mm256_i32gather_ps(ptr, hi, 4));
Mike Klein1b9b7d52018-02-27 10:37:40 -05003500 }
3501
3502 template<>
Kevin Lubickb5502b22018-03-12 10:17:06 -04003503 U32 gather(const uint32_t* ptr, U32 ix) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003504 __m256i lo, hi;
3505 split(ix, &lo, &hi);
3506
Kevin Lubickb5502b22018-03-12 10:17:06 -04003507 return join<U32>(_mm256_i32gather_epi32(ptr, lo, 4),
3508 _mm256_i32gather_epi32(ptr, hi, 4));
Mike Klein1b9b7d52018-02-27 10:37:40 -05003509 }
3510#else
3511 template <typename V, typename T>
3512 SI V gather(const T* ptr, U32 ix) {
3513 return V{ ptr[ix[ 0]], ptr[ix[ 1]], ptr[ix[ 2]], ptr[ix[ 3]],
3514 ptr[ix[ 4]], ptr[ix[ 5]], ptr[ix[ 6]], ptr[ix[ 7]], };
3515 }
3516#endif
3517
3518
3519// ~~~~~~ 32-bit memory loads and stores ~~~~~~ //
3520
3521SI void from_8888(U32 rgba, U16* r, U16* g, U16* b, U16* a) {
Mike Klein83e86eb2018-08-31 10:19:21 -04003522#if 1 && defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
Mike Klein1b9b7d52018-02-27 10:37:40 -05003523 // Swap the middle 128-bit lanes to make _mm256_packus_epi32() in cast_U16() work out nicely.
3524 __m256i _01,_23;
3525 split(rgba, &_01, &_23);
3526 __m256i _02 = _mm256_permute2x128_si256(_01,_23, 0x20),
3527 _13 = _mm256_permute2x128_si256(_01,_23, 0x31);
3528 rgba = join<U32>(_02, _13);
3529
3530 auto cast_U16 = [](U32 v) -> U16 {
3531 __m256i _02,_13;
3532 split(v, &_02,&_13);
3533 return _mm256_packus_epi32(_02,_13);
3534 };
3535#else
3536 auto cast_U16 = [](U32 v) -> U16 {
3537 return cast<U16>(v);
3538 };
3539#endif
3540 *r = cast_U16(rgba & 65535) & 255;
3541 *g = cast_U16(rgba & 65535) >> 8;
3542 *b = cast_U16(rgba >> 16) & 255;
3543 *a = cast_U16(rgba >> 16) >> 8;
3544}
3545
3546SI void load_8888_(const uint32_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
Mike Klein73d7ffc2018-07-25 09:19:23 -04003547#if 1 && defined(JUMPER_IS_NEON)
Mike Klein1b9b7d52018-02-27 10:37:40 -05003548 uint8x8x4_t rgba;
3549 switch (tail & (N-1)) {
3550 case 0: rgba = vld4_u8 ((const uint8_t*)(ptr+0) ); break;
3551 case 7: rgba = vld4_lane_u8((const uint8_t*)(ptr+6), rgba, 6);
3552 case 6: rgba = vld4_lane_u8((const uint8_t*)(ptr+5), rgba, 5);
3553 case 5: rgba = vld4_lane_u8((const uint8_t*)(ptr+4), rgba, 4);
3554 case 4: rgba = vld4_lane_u8((const uint8_t*)(ptr+3), rgba, 3);
3555 case 3: rgba = vld4_lane_u8((const uint8_t*)(ptr+2), rgba, 2);
3556 case 2: rgba = vld4_lane_u8((const uint8_t*)(ptr+1), rgba, 1);
3557 case 1: rgba = vld4_lane_u8((const uint8_t*)(ptr+0), rgba, 0);
3558 }
3559 *r = cast<U16>(rgba.val[0]);
3560 *g = cast<U16>(rgba.val[1]);
3561 *b = cast<U16>(rgba.val[2]);
3562 *a = cast<U16>(rgba.val[3]);
3563#else
3564 from_8888(load<U32>(ptr, tail), r,g,b,a);
3565#endif
3566}
3567SI void store_8888_(uint32_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
Mike Klein73d7ffc2018-07-25 09:19:23 -04003568#if 1 && defined(JUMPER_IS_NEON)
Mike Klein1b9b7d52018-02-27 10:37:40 -05003569 uint8x8x4_t rgba = {{
3570 cast<U8>(r),
3571 cast<U8>(g),
3572 cast<U8>(b),
3573 cast<U8>(a),
3574 }};
3575 switch (tail & (N-1)) {
3576 case 0: vst4_u8 ((uint8_t*)(ptr+0), rgba ); break;
3577 case 7: vst4_lane_u8((uint8_t*)(ptr+6), rgba, 6);
3578 case 6: vst4_lane_u8((uint8_t*)(ptr+5), rgba, 5);
3579 case 5: vst4_lane_u8((uint8_t*)(ptr+4), rgba, 4);
3580 case 4: vst4_lane_u8((uint8_t*)(ptr+3), rgba, 3);
3581 case 3: vst4_lane_u8((uint8_t*)(ptr+2), rgba, 2);
3582 case 2: vst4_lane_u8((uint8_t*)(ptr+1), rgba, 1);
3583 case 1: vst4_lane_u8((uint8_t*)(ptr+0), rgba, 0);
3584 }
3585#else
3586 store(ptr, tail, cast<U32>(r | (g<<8)) << 0
3587 | cast<U32>(b | (a<<8)) << 16);
3588#endif
3589}
3590
Mike Kleinb11ab572018-10-24 06:42:14 -04003591STAGE_PP(load_8888, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003592 load_8888_(ptr_at_xy<const uint32_t>(ctx, dx,dy), tail, &r,&g,&b,&a);
3593}
Mike Kleinb11ab572018-10-24 06:42:14 -04003594STAGE_PP(load_8888_dst, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003595 load_8888_(ptr_at_xy<const uint32_t>(ctx, dx,dy), tail, &dr,&dg,&db,&da);
3596}
Mike Kleinb11ab572018-10-24 06:42:14 -04003597STAGE_PP(store_8888, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003598 store_8888_(ptr_at_xy<uint32_t>(ctx, dx,dy), tail, r,g,b,a);
3599}
Mike Kleinb11ab572018-10-24 06:42:14 -04003600STAGE_GP(gather_8888, const SkRasterPipeline_GatherCtx* ctx) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003601 const uint32_t* ptr;
3602 U32 ix = ix_and_ptr(&ptr, ctx, x,y);
3603 from_8888(gather<U32>(ptr, ix), &r, &g, &b, &a);
3604}
Mike Klein1b9b7d52018-02-27 10:37:40 -05003605
3606// ~~~~~~ 16-bit memory loads and stores ~~~~~~ //
3607
3608SI void from_565(U16 rgb, U16* r, U16* g, U16* b) {
3609 // Format for 565 buffers: 15|rrrrr gggggg bbbbb|0
3610 U16 R = (rgb >> 11) & 31,
3611 G = (rgb >> 5) & 63,
3612 B = (rgb >> 0) & 31;
3613
3614 // These bit replications are the same as multiplying by 255/31 or 255/63 to scale to 8-bit.
3615 *r = (R << 3) | (R >> 2);
3616 *g = (G << 2) | (G >> 4);
3617 *b = (B << 3) | (B >> 2);
3618}
3619SI void load_565_(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
3620 from_565(load<U16>(ptr, tail), r,g,b);
3621}
3622SI void store_565_(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b) {
Mike Klein1c941432019-02-27 14:22:55 -06003623 // Round from [0,255] to [0,31] or [0,63], as if x * (31/255.0f) + 0.5f.
3624 // (Don't feel like you need to find some fundamental truth in these...
3625 // they were brute-force searched.)
3626 U16 R = (r * 9 + 36) / 74, // 9/74 ≈ 31/255, plus 36/74, about half.
3627 G = (g * 21 + 42) / 85, // 21/85 = 63/255 exactly.
3628 B = (b * 9 + 36) / 74;
Mike Klein1b9b7d52018-02-27 10:37:40 -05003629 // Pack them back into 15|rrrrr gggggg bbbbb|0.
3630 store(ptr, tail, R << 11
3631 | G << 5
3632 | B << 0);
3633}
3634
Mike Kleinb11ab572018-10-24 06:42:14 -04003635STAGE_PP(load_565, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003636 load_565_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &r,&g,&b);
3637 a = 255;
3638}
Mike Kleinb11ab572018-10-24 06:42:14 -04003639STAGE_PP(load_565_dst, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003640 load_565_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &dr,&dg,&db);
3641 da = 255;
3642}
Mike Kleinb11ab572018-10-24 06:42:14 -04003643STAGE_PP(store_565, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003644 store_565_(ptr_at_xy<uint16_t>(ctx, dx,dy), tail, r,g,b);
3645}
Mike Kleinb11ab572018-10-24 06:42:14 -04003646STAGE_GP(gather_565, const SkRasterPipeline_GatherCtx* ctx) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003647 const uint16_t* ptr;
3648 U32 ix = ix_and_ptr(&ptr, ctx, x,y);
3649 from_565(gather<U16>(ptr, ix), &r, &g, &b);
3650 a = 255;
3651}
3652
3653SI void from_4444(U16 rgba, U16* r, U16* g, U16* b, U16* a) {
3654 // Format for 4444 buffers: 15|rrrr gggg bbbb aaaa|0.
3655 U16 R = (rgba >> 12) & 15,
3656 G = (rgba >> 8) & 15,
3657 B = (rgba >> 4) & 15,
3658 A = (rgba >> 0) & 15;
3659
3660 // Scale [0,15] to [0,255].
3661 *r = (R << 4) | R;
3662 *g = (G << 4) | G;
3663 *b = (B << 4) | B;
3664 *a = (A << 4) | A;
3665}
3666SI void load_4444_(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
3667 from_4444(load<U16>(ptr, tail), r,g,b,a);
3668}
3669SI void store_4444_(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
Mike Klein1c941432019-02-27 14:22:55 -06003670 // Round from [0,255] to [0,15], producing the same value as (x*(15/255.0f) + 0.5f).
3671 U16 R = (r + 8) / 17,
3672 G = (g + 8) / 17,
3673 B = (b + 8) / 17,
3674 A = (a + 8) / 17;
Mike Klein1b9b7d52018-02-27 10:37:40 -05003675 // Pack them back into 15|rrrr gggg bbbb aaaa|0.
3676 store(ptr, tail, R << 12
3677 | G << 8
3678 | B << 4
3679 | A << 0);
3680}
3681
Mike Kleinb11ab572018-10-24 06:42:14 -04003682STAGE_PP(load_4444, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003683 load_4444_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &r,&g,&b,&a);
3684}
Mike Kleinb11ab572018-10-24 06:42:14 -04003685STAGE_PP(load_4444_dst, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003686 load_4444_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &dr,&dg,&db,&da);
3687}
Mike Kleinb11ab572018-10-24 06:42:14 -04003688STAGE_PP(store_4444, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003689 store_4444_(ptr_at_xy<uint16_t>(ctx, dx,dy), tail, r,g,b,a);
3690}
Mike Kleinb11ab572018-10-24 06:42:14 -04003691STAGE_GP(gather_4444, const SkRasterPipeline_GatherCtx* ctx) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003692 const uint16_t* ptr;
3693 U32 ix = ix_and_ptr(&ptr, ctx, x,y);
3694 from_4444(gather<U16>(ptr, ix), &r,&g,&b,&a);
3695}
3696
Brian Salomon217522c2019-06-11 15:55:30 -04003697SI void from_88(U16 rg, U16* r, U16* g) {
3698 *r = (rg & 0xFF);
3699 *g = (rg >> 8);
3700}
3701
3702SI void load_88_(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
3703#if 1 && defined(JUMPER_IS_NEON)
3704 uint8x8x2_t rg;
3705 switch (tail & (N-1)) {
3706 case 0: rg = vld2_u8 ((const uint8_t*)(ptr+0) ); break;
3707 case 7: rg = vld2_lane_u8((const uint8_t*)(ptr+6), rg, 6);
3708 case 6: rg = vld2_lane_u8((const uint8_t*)(ptr+5), rg, 5);
3709 case 5: rg = vld2_lane_u8((const uint8_t*)(ptr+4), rg, 4);
3710 case 4: rg = vld2_lane_u8((const uint8_t*)(ptr+3), rg, 3);
3711 case 3: rg = vld2_lane_u8((const uint8_t*)(ptr+2), rg, 2);
3712 case 2: rg = vld2_lane_u8((const uint8_t*)(ptr+1), rg, 1);
3713 case 1: rg = vld2_lane_u8((const uint8_t*)(ptr+0), rg, 0);
3714 }
3715 *r = cast<U16>(rg.val[0]);
3716 *g = cast<U16>(rg.val[1]);
3717#else
3718 from_88(load<U16>(ptr, tail), r,g);
3719#endif
3720}
3721
3722SI void store_88_(uint16_t* ptr, size_t tail, U16 r, U16 g) {
3723#if 1 && defined(JUMPER_IS_NEON)
3724 uint8x8x2_t rg = {{
3725 cast<U8>(r),
3726 cast<U8>(g),
3727 }};
3728 switch (tail & (N-1)) {
3729 case 0: vst2_u8 ((uint8_t*)(ptr+0), rg ); break;
3730 case 7: vst2_lane_u8((uint8_t*)(ptr+6), rg, 6);
3731 case 6: vst2_lane_u8((uint8_t*)(ptr+5), rg, 5);
3732 case 5: vst2_lane_u8((uint8_t*)(ptr+4), rg, 4);
3733 case 4: vst2_lane_u8((uint8_t*)(ptr+3), rg, 3);
3734 case 3: vst2_lane_u8((uint8_t*)(ptr+2), rg, 2);
3735 case 2: vst2_lane_u8((uint8_t*)(ptr+1), rg, 1);
3736 case 1: vst2_lane_u8((uint8_t*)(ptr+0), rg, 0);
3737 }
3738#else
3739 store(ptr, tail, cast<U16>(r | (g<<8)) << 0);
3740#endif
3741}
3742
3743STAGE_PP(load_rg88, const SkRasterPipeline_MemoryCtx* ctx) {
Robert Phillipsd470e1b2019-09-04 15:05:35 -04003744 load_88_(ptr_at_xy<const uint16_t>(ctx, dx, dy), tail, &r, &g);
Brian Salomon217522c2019-06-11 15:55:30 -04003745 b = 0;
Brian Salomonf30b1c12019-06-20 12:25:02 -04003746 a = 255;
Robert Phillipsd470e1b2019-09-04 15:05:35 -04003747}
3748STAGE_PP(load_rg88_dst, const SkRasterPipeline_MemoryCtx* ctx) {
3749 load_88_(ptr_at_xy<const uint16_t>(ctx, dx, dy), tail, &dr, &dg);
3750 db = 0;
3751 da = 255;
Brian Salomon217522c2019-06-11 15:55:30 -04003752}
3753STAGE_PP(store_rg88, const SkRasterPipeline_MemoryCtx* ctx) {
3754 store_88_(ptr_at_xy<uint16_t>(ctx, dx, dy), tail, r, g);
3755}
Robert Phillipsd470e1b2019-09-04 15:05:35 -04003756STAGE_GP(gather_rg88, const SkRasterPipeline_GatherCtx* ctx) {
3757 const uint16_t* ptr;
3758 U32 ix = ix_and_ptr(&ptr, ctx, x, y);
3759 from_88(gather<U16>(ptr, ix), &r, &g);
3760 b = 0;
3761 a = 255;
3762}
Brian Salomon217522c2019-06-11 15:55:30 -04003763
Mike Klein1b9b7d52018-02-27 10:37:40 -05003764// ~~~~~~ 8-bit memory loads and stores ~~~~~~ //
3765
3766SI U16 load_8(const uint8_t* ptr, size_t tail) {
3767 return cast<U16>(load<U8>(ptr, tail));
3768}
3769SI void store_8(uint8_t* ptr, size_t tail, U16 v) {
3770 store(ptr, tail, cast<U8>(v));
3771}
3772
Mike Kleinb11ab572018-10-24 06:42:14 -04003773STAGE_PP(load_a8, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003774 r = g = b = 0;
3775 a = load_8(ptr_at_xy<const uint8_t>(ctx, dx,dy), tail);
3776}
Mike Kleinb11ab572018-10-24 06:42:14 -04003777STAGE_PP(load_a8_dst, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003778 dr = dg = db = 0;
3779 da = load_8(ptr_at_xy<const uint8_t>(ctx, dx,dy), tail);
3780}
Mike Kleinb11ab572018-10-24 06:42:14 -04003781STAGE_PP(store_a8, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003782 store_8(ptr_at_xy<uint8_t>(ctx, dx,dy), tail, a);
3783}
Mike Kleinb11ab572018-10-24 06:42:14 -04003784STAGE_GP(gather_a8, const SkRasterPipeline_GatherCtx* ctx) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003785 const uint8_t* ptr;
3786 U32 ix = ix_and_ptr(&ptr, ctx, x,y);
3787 r = g = b = 0;
3788 a = cast<U16>(gather<U8>(ptr, ix));
3789}
3790
Mike Kleinb1df5e52018-10-17 17:06:03 -04003791STAGE_PP(alpha_to_gray, Ctx::None) {
3792 r = g = b = a;
Mike Klein1b9b7d52018-02-27 10:37:40 -05003793 a = 255;
3794}
Mike Kleinb1df5e52018-10-17 17:06:03 -04003795STAGE_PP(alpha_to_gray_dst, Ctx::None) {
3796 dr = dg = db = da;
Mike Klein1b9b7d52018-02-27 10:37:40 -05003797 da = 255;
3798}
Mike Kleinda69d592019-07-11 07:38:31 -05003799STAGE_PP(bt709_luminance_or_luma_to_alpha, Ctx::None) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003800 a = (r*54 + g*183 + b*19)/256; // 0.2126, 0.7152, 0.0722 with 256 denominator.
3801 r = g = b = 0;
3802}
Mike Klein1b9b7d52018-02-27 10:37:40 -05003803
3804// ~~~~~~ Coverage scales / lerps ~~~~~~ //
3805
Mike Reed895e1ee2019-03-16 13:16:54 -04003806STAGE_PP(load_src, const uint16_t* ptr) {
Mike Klein7a177b42019-06-17 17:17:47 -05003807 r = sk_unaligned_load<U16>(ptr + 0*N);
3808 g = sk_unaligned_load<U16>(ptr + 1*N);
3809 b = sk_unaligned_load<U16>(ptr + 2*N);
3810 a = sk_unaligned_load<U16>(ptr + 3*N);
Mike Reed895e1ee2019-03-16 13:16:54 -04003811}
3812STAGE_PP(store_src, uint16_t* ptr) {
Mike Klein7a177b42019-06-17 17:17:47 -05003813 sk_unaligned_store(ptr + 0*N, r);
3814 sk_unaligned_store(ptr + 1*N, g);
3815 sk_unaligned_store(ptr + 2*N, b);
3816 sk_unaligned_store(ptr + 3*N, a);
Mike Reed895e1ee2019-03-16 13:16:54 -04003817}
3818STAGE_PP(load_dst, const uint16_t* ptr) {
Mike Klein7a177b42019-06-17 17:17:47 -05003819 dr = sk_unaligned_load<U16>(ptr + 0*N);
3820 dg = sk_unaligned_load<U16>(ptr + 1*N);
3821 db = sk_unaligned_load<U16>(ptr + 2*N);
3822 da = sk_unaligned_load<U16>(ptr + 3*N);
Mike Reed895e1ee2019-03-16 13:16:54 -04003823}
3824STAGE_PP(store_dst, uint16_t* ptr) {
Mike Klein7a177b42019-06-17 17:17:47 -05003825 sk_unaligned_store(ptr + 0*N, dr);
3826 sk_unaligned_store(ptr + 1*N, dg);
3827 sk_unaligned_store(ptr + 2*N, db);
3828 sk_unaligned_store(ptr + 3*N, da);
Mike Reed895e1ee2019-03-16 13:16:54 -04003829}
3830
3831// ~~~~~~ Coverage scales / lerps ~~~~~~ //
3832
Mike Klein1b9b7d52018-02-27 10:37:40 -05003833STAGE_PP(scale_1_float, const float* f) {
3834 U16 c = from_float(*f);
3835 r = div255( r * c );
3836 g = div255( g * c );
3837 b = div255( b * c );
3838 a = div255( a * c );
3839}
3840STAGE_PP(lerp_1_float, const float* f) {
3841 U16 c = from_float(*f);
3842 r = lerp(dr, r, c);
3843 g = lerp(dg, g, c);
3844 b = lerp(db, b, c);
3845 a = lerp(da, a, c);
3846}
Mike Reed895e1ee2019-03-16 13:16:54 -04003847STAGE_PP(lerp_native, const uint16_t scales[]) {
Mike Klein7a177b42019-06-17 17:17:47 -05003848 auto c = sk_unaligned_load<U16>(scales);
Mike Reed895e1ee2019-03-16 13:16:54 -04003849 r = lerp(dr, r, c);
3850 g = lerp(dg, g, c);
3851 b = lerp(db, b, c);
3852 a = lerp(da, a, c);
3853}
Mike Klein1b9b7d52018-02-27 10:37:40 -05003854
Mike Kleinb11ab572018-10-24 06:42:14 -04003855STAGE_PP(scale_u8, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003856 U16 c = load_8(ptr_at_xy<const uint8_t>(ctx, dx,dy), tail);
3857 r = div255( r * c );
3858 g = div255( g * c );
3859 b = div255( b * c );
3860 a = div255( a * c );
3861}
Mike Kleinb11ab572018-10-24 06:42:14 -04003862STAGE_PP(lerp_u8, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003863 U16 c = load_8(ptr_at_xy<const uint8_t>(ctx, dx,dy), tail);
3864 r = lerp(dr, r, c);
3865 g = lerp(dg, g, c);
3866 b = lerp(db, b, c);
3867 a = lerp(da, a, c);
3868}
3869
3870// Derive alpha's coverage from rgb coverage and the values of src and dst alpha.
3871SI U16 alpha_coverage_from_rgb_coverage(U16 a, U16 da, U16 cr, U16 cg, U16 cb) {
Mike Klein5d835d02019-10-16 13:28:55 -05003872 return if_then_else(a < da, min(cr, min(cg,cb))
3873 , max(cr, max(cg,cb)));
Mike Klein1b9b7d52018-02-27 10:37:40 -05003874}
Mike Kleinb11ab572018-10-24 06:42:14 -04003875STAGE_PP(scale_565, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003876 U16 cr,cg,cb;
3877 load_565_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &cr,&cg,&cb);
3878 U16 ca = alpha_coverage_from_rgb_coverage(a,da, cr,cg,cb);
3879
3880 r = div255( r * cr );
3881 g = div255( g * cg );
3882 b = div255( b * cb );
3883 a = div255( a * ca );
3884}
Mike Kleinb11ab572018-10-24 06:42:14 -04003885STAGE_PP(lerp_565, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003886 U16 cr,cg,cb;
3887 load_565_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &cr,&cg,&cb);
3888 U16 ca = alpha_coverage_from_rgb_coverage(a,da, cr,cg,cb);
3889
3890 r = lerp(dr, r, cr);
3891 g = lerp(dg, g, cg);
3892 b = lerp(db, b, cb);
3893 a = lerp(da, a, ca);
3894}
3895
Mike Kleineda2ac22018-11-06 11:53:59 -05003896STAGE_PP(emboss, const SkRasterPipeline_EmbossCtx* ctx) {
3897 U16 mul = load_8(ptr_at_xy<const uint8_t>(&ctx->mul, dx,dy), tail),
3898 add = load_8(ptr_at_xy<const uint8_t>(&ctx->add, dx,dy), tail);
3899
3900 r = min(div255(r*mul) + add, a);
3901 g = min(div255(g*mul) + add, a);
3902 b = min(div255(b*mul) + add, a);
3903}
3904
3905
Mike Klein1b9b7d52018-02-27 10:37:40 -05003906// ~~~~~~ Gradient stages ~~~~~~ //
3907
3908// Clamp x to [0,1], both sides inclusive (think, gradients).
3909// Even repeat and mirror funnel through a clamp to handle bad inputs like +Inf, NaN.
3910SI F clamp_01(F v) { return min(max(0, v), 1); }
3911
3912STAGE_GG(clamp_x_1 , Ctx::None) { x = clamp_01(x); }
3913STAGE_GG(repeat_x_1, Ctx::None) { x = clamp_01(x - floor_(x)); }
3914STAGE_GG(mirror_x_1, Ctx::None) {
3915 auto two = [](F x){ return x+x; };
3916 x = clamp_01(abs_( (x-1.0f) - two(floor_((x-1.0f)*0.5f)) - 1.0f ));
3917}
3918
3919SI I16 cond_to_mask_16(I32 cond) { return cast<I16>(cond); }
3920
Mike Kleinb11ab572018-10-24 06:42:14 -04003921STAGE_GG(decal_x, SkRasterPipeline_DecalTileCtx* ctx) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003922 auto w = ctx->limit_x;
Mike Klein7a177b42019-06-17 17:17:47 -05003923 sk_unaligned_store(ctx->mask, cond_to_mask_16((0 <= x) & (x < w)));
Mike Klein1b9b7d52018-02-27 10:37:40 -05003924}
Mike Kleinb11ab572018-10-24 06:42:14 -04003925STAGE_GG(decal_y, SkRasterPipeline_DecalTileCtx* ctx) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003926 auto h = ctx->limit_y;
Mike Klein7a177b42019-06-17 17:17:47 -05003927 sk_unaligned_store(ctx->mask, cond_to_mask_16((0 <= y) & (y < h)));
Mike Klein1b9b7d52018-02-27 10:37:40 -05003928}
Mike Kleinb11ab572018-10-24 06:42:14 -04003929STAGE_GG(decal_x_and_y, SkRasterPipeline_DecalTileCtx* ctx) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05003930 auto w = ctx->limit_x;
3931 auto h = ctx->limit_y;
Mike Klein7a177b42019-06-17 17:17:47 -05003932 sk_unaligned_store(ctx->mask, cond_to_mask_16((0 <= x) & (x < w) & (0 <= y) & (y < h)));
Mike Klein1b9b7d52018-02-27 10:37:40 -05003933}
Mike Kleinb11ab572018-10-24 06:42:14 -04003934STAGE_PP(check_decal_mask, SkRasterPipeline_DecalTileCtx* ctx) {
Mike Klein7a177b42019-06-17 17:17:47 -05003935 auto mask = sk_unaligned_load<U16>(ctx->mask);
Mike Klein1b9b7d52018-02-27 10:37:40 -05003936 r = r & mask;
3937 g = g & mask;
3938 b = b & mask;
3939 a = a & mask;
3940}
3941
Mike Klein24de6482018-09-07 12:05:29 -04003942SI void round_F_to_U16(F R, F G, F B, F A, bool interpolatedInPremul,
3943 U16* r, U16* g, U16* b, U16* a) {
3944 auto round = [](F x) { return cast<U16>(x * 255.0f + 0.5f); };
Mike Klein1b9b7d52018-02-27 10:37:40 -05003945
Mike Klein24de6482018-09-07 12:05:29 -04003946 F limit = interpolatedInPremul ? A
3947 : 1;
3948 *r = round(min(max(0,R), limit));
3949 *g = round(min(max(0,G), limit));
3950 *b = round(min(max(0,B), limit));
3951 *a = round(A); // we assume alpha is already in [0,1].
3952}
Mike Klein1b9b7d52018-02-27 10:37:40 -05003953
Mike Kleinb11ab572018-10-24 06:42:14 -04003954SI void gradient_lookup(const SkRasterPipeline_GradientCtx* c, U32 idx, F t,
Mike Klein1b9b7d52018-02-27 10:37:40 -05003955 U16* r, U16* g, U16* b, U16* a) {
3956
3957 F fr, fg, fb, fa, br, bg, bb, ba;
Mike Klein83e86eb2018-08-31 10:19:21 -04003958#if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_AVX512)
Mike Klein1b9b7d52018-02-27 10:37:40 -05003959 if (c->stopCount <=8) {
3960 __m256i lo, hi;
3961 split(idx, &lo, &hi);
3962
3963 fr = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[0]), lo),
3964 _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[0]), hi));
3965 br = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[0]), lo),
3966 _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[0]), hi));
3967 fg = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[1]), lo),
3968 _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[1]), hi));
3969 bg = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[1]), lo),
3970 _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[1]), hi));
3971 fb = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[2]), lo),
3972 _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[2]), hi));
3973 bb = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[2]), lo),
3974 _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[2]), hi));
3975 fa = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[3]), lo),
3976 _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[3]), hi));
3977 ba = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[3]), lo),
3978 _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[3]), hi));
3979 } else
3980#endif
3981 {
3982 fr = gather<F>(c->fs[0], idx);
3983 fg = gather<F>(c->fs[1], idx);
3984 fb = gather<F>(c->fs[2], idx);
3985 fa = gather<F>(c->fs[3], idx);
3986 br = gather<F>(c->bs[0], idx);
3987 bg = gather<F>(c->bs[1], idx);
3988 bb = gather<F>(c->bs[2], idx);
3989 ba = gather<F>(c->bs[3], idx);
3990 }
Mike Klein24de6482018-09-07 12:05:29 -04003991 round_F_to_U16(mad(t, fr, br),
3992 mad(t, fg, bg),
3993 mad(t, fb, bb),
3994 mad(t, fa, ba),
3995 c->interpolatedInPremul,
3996 r,g,b,a);
Mike Klein1b9b7d52018-02-27 10:37:40 -05003997}
3998
Mike Kleinb11ab572018-10-24 06:42:14 -04003999STAGE_GP(gradient, const SkRasterPipeline_GradientCtx* c) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05004000 auto t = x;
4001 U32 idx = 0;
4002
4003 // N.B. The loop starts at 1 because idx 0 is the color to use before the first stop.
4004 for (size_t i = 1; i < c->stopCount; i++) {
4005 idx += if_then_else(t >= c->ts[i], U32(1), U32(0));
4006 }
4007
4008 gradient_lookup(c, idx, t, &r, &g, &b, &a);
4009}
4010
Mike Kleinb11ab572018-10-24 06:42:14 -04004011STAGE_GP(evenly_spaced_gradient, const SkRasterPipeline_GradientCtx* c) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05004012 auto t = x;
4013 auto idx = trunc_(t * (c->stopCount-1));
4014 gradient_lookup(c, idx, t, &r, &g, &b, &a);
4015}
4016
Mike Kleinb11ab572018-10-24 06:42:14 -04004017STAGE_GP(evenly_spaced_2_stop_gradient, const SkRasterPipeline_EvenlySpaced2StopGradientCtx* c) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05004018 auto t = x;
Mike Klein24de6482018-09-07 12:05:29 -04004019 round_F_to_U16(mad(t, c->f[0], c->b[0]),
4020 mad(t, c->f[1], c->b[1]),
4021 mad(t, c->f[2], c->b[2]),
4022 mad(t, c->f[3], c->b[3]),
4023 c->interpolatedInPremul,
4024 &r,&g,&b,&a);
Mike Klein1b9b7d52018-02-27 10:37:40 -05004025}
4026
4027STAGE_GG(xy_to_unit_angle, Ctx::None) {
4028 F xabs = abs_(x),
4029 yabs = abs_(y);
4030
4031 F slope = min(xabs, yabs)/max(xabs, yabs);
4032 F s = slope * slope;
4033
4034 // Use a 7th degree polynomial to approximate atan.
4035 // This was generated using sollya.gforge.inria.fr.
4036 // A float optimized polynomial was generated using the following command.
4037 // P1 = fpminimax((1/(2*Pi))*atan(x),[|1,3,5,7|],[|24...|],[2^(-40),1],relative);
4038 F phi = slope
4039 * (0.15912117063999176025390625f + s
4040 * (-5.185396969318389892578125e-2f + s
4041 * (2.476101927459239959716796875e-2f + s
4042 * (-7.0547382347285747528076171875e-3f))));
4043
4044 phi = if_then_else(xabs < yabs, 1.0f/4.0f - phi, phi);
4045 phi = if_then_else(x < 0.0f , 1.0f/2.0f - phi, phi);
4046 phi = if_then_else(y < 0.0f , 1.0f - phi , phi);
4047 phi = if_then_else(phi != phi , 0 , phi); // Check for NaN.
4048 x = phi;
4049}
4050STAGE_GG(xy_to_radius, Ctx::None) {
4051 x = sqrt_(x*x + y*y);
4052}
4053
4054// ~~~~~~ Compound stages ~~~~~~ //
4055
Mike Kleinb11ab572018-10-24 06:42:14 -04004056STAGE_PP(srcover_rgba_8888, const SkRasterPipeline_MemoryCtx* ctx) {
Mike Klein1b9b7d52018-02-27 10:37:40 -05004057 auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
4058
4059 load_8888_(ptr, tail, &dr,&dg,&db,&da);
4060 r = r + div255( dr*inv(a) );
4061 g = g + div255( dg*inv(a) );
4062 b = b + div255( db*inv(a) );
4063 a = a + div255( da*inv(a) );
4064 store_8888_(ptr, tail, r,g,b,a);
4065}
Mike Klein05bf9312018-12-19 10:05:03 -05004066
Mike Reedcf27e742019-03-03 22:12:16 +00004067#if defined(SK_DISABLE_LOWP_BILERP_CLAMP_CLAMP_STAGE)
4068 static void(*bilerp_clamp_8888)(void) = nullptr;
Mike Klein01005622019-08-13 12:22:17 -04004069 static void(*bilinear)(void) = nullptr;
Mike Reedcf27e742019-03-03 22:12:16 +00004070#else
Mike Kleinad82b402019-10-17 20:13:14 +00004071STAGE_GP(bilerp_clamp_8888, const SkRasterPipeline_GatherCtx* ctx) {
4072 // (cx,cy) are the center of our sample.
4073 F cx = x,
4074 cy = y;
4075
4076 // All sample points are at the same fractional offset (fx,fy).
4077 // They're the 4 corners of a logical 1x1 pixel surrounding (x,y) at (0.5,0.5) offsets.
4078 F fx = fract(cx + 0.5f),
4079 fy = fract(cy + 0.5f);
4080
4081 // We'll accumulate the color of all four samples into {r,g,b,a} directly.
4082 r = g = b = a = 0;
4083
4084 // The first three sample points will calculate their area using math
4085 // just like in the float code above, but the fourth will take up all the rest.
4086 //
4087 // Logically this is the same as doing the math for the fourth pixel too,
4088 // but rounding error makes this a better strategy, keeping opaque opaque, etc.
4089 //
4090 // We can keep up to 8 bits of fractional precision without overflowing 16-bit,
4091 // so our "1.0" area is 256.
4092 const uint16_t bias = 256;
4093 U16 remaining = bias;
4094
4095 for (float dy = -0.5f; dy <= +0.5f; dy += 1.0f)
4096 for (float dx = -0.5f; dx <= +0.5f; dx += 1.0f) {
4097 // (x,y) are the coordinates of this sample point.
4098 F x = cx + dx,
4099 y = cy + dy;
4100
4101 // ix_and_ptr() will clamp to the image's bounds for us.
4102 const uint32_t* ptr;
4103 U32 ix = ix_and_ptr(&ptr, ctx, x,y);
4104
4105 U16 sr,sg,sb,sa;
4106 from_8888(gather<U32>(ptr, ix), &sr,&sg,&sb,&sa);
4107
4108 // In bilinear interpolation, the 4 pixels at +/- 0.5 offsets from the sample pixel center
4109 // are combined in direct proportion to their area overlapping that logical query pixel.
4110 // At positive offsets, the x-axis contribution to that rectangle is fx,
4111 // or (1-fx) at negative x. Same deal for y.
4112 F sx = (dx > 0) ? fx : 1.0f - fx,
4113 sy = (dy > 0) ? fy : 1.0f - fy;
4114
4115 U16 area = (dy == 0.5f && dx == 0.5f) ? remaining
4116 : cast<U16>(sx * sy * bias);
4117 for (size_t i = 0; i < N; i++) {
4118 SkASSERT(remaining[i] >= area[i]);
4119 }
4120 remaining -= area;
4121
4122 r += sr * area;
4123 g += sg * area;
4124 b += sb * area;
4125 a += sa * area;
4126 }
4127
4128 r = (r + bias/2) / bias;
4129 g = (g + bias/2) / bias;
4130 b = (b + bias/2) / bias;
4131 a = (a + bias/2) / bias;
4132}
4133
Mike Klein01005622019-08-13 12:22:17 -04004134// TODO: lowp::tile() is identical to the highp tile()... share?
4135SI F tile(F v, SkTileMode mode, float limit, float invLimit) {
4136 // After ix_and_ptr() will clamp the output of tile(), so we need not clamp here.
4137 switch (mode) {
4138 case SkTileMode::kDecal: // TODO, for now fallthrough to clamp
4139 case SkTileMode::kClamp: return v;
4140 case SkTileMode::kRepeat: return v - floor_(v*invLimit)*limit;
4141 case SkTileMode::kMirror:
4142 return abs_( (v-limit) - (limit+limit)*floor_((v-limit)*(invLimit*0.5f)) - limit );
4143 }
4144 SkUNREACHABLE;
4145}
4146
4147SI void sample(const SkRasterPipeline_SamplerCtx2* ctx, F x, F y,
4148 U16* r, U16* g, U16* b, U16* a) {
4149 x = tile(x, ctx->tileX, ctx->width , ctx->invWidth );
4150 y = tile(y, ctx->tileY, ctx->height, ctx->invHeight);
4151
4152 switch (ctx->ct) {
4153 default: *r = *g = *b = *a = 0; // TODO
4154 break;
4155
4156 case kRGBA_8888_SkColorType:
4157 case kBGRA_8888_SkColorType: {
4158 const uint32_t* ptr;
4159 U32 ix = ix_and_ptr(&ptr, ctx, x,y);
4160 from_8888(gather<U32>(ptr, ix), r,g,b,a);
4161 if (ctx->ct == kBGRA_8888_SkColorType) {
4162 std::swap(*r,*b);
4163 }
4164 } break;
4165 }
4166}
4167
4168template <int D>
4169SI void sampler(const SkRasterPipeline_SamplerCtx2* ctx,
4170 F cx, F cy, const F (&wx)[D], const F (&wy)[D],
4171 U16* r, U16* g, U16* b, U16* a) {
4172
4173 float start = -0.5f*(D-1);
4174
4175 const uint16_t bias = 256;
4176 U16 remaining = bias;
4177
4178 *r = *g = *b = *a = 0;
4179 F y = cy + start;
4180 for (int j = 0; j < D; j++, y += 1.0f) {
4181 F x = cx + start;
4182 for (int i = 0; i < D; i++, x += 1.0f) {
4183 U16 R,G,B,A;
4184 sample(ctx, x,y, &R,&G,&B,&A);
4185
4186 U16 w = (i == D-1 && j == D-1) ? remaining
4187 : cast<U16>(wx[i]*wy[j]*bias);
4188 remaining -= w;
4189 *r += w*R;
4190 *g += w*G;
4191 *b += w*B;
4192 *a += w*A;
4193 }
4194 }
4195 *r = (*r + bias/2) / bias;
4196 *g = (*g + bias/2) / bias;
4197 *b = (*b + bias/2) / bias;
4198 *a = (*a + bias/2) / bias;
4199}
4200
4201STAGE_GP(bilinear, const SkRasterPipeline_SamplerCtx2* ctx) {
4202 F fx = fract(x + 0.5f),
4203 fy = fract(y + 0.5f);
4204 const F wx[] = {1.0f - fx, fx};
4205 const F wy[] = {1.0f - fy, fy};
4206
4207 sampler(ctx, x,y, wx,wy, &r,&g,&b,&a);
4208}
Mike Reedcf27e742019-03-03 22:12:16 +00004209#endif
4210
Brian Salomon217522c2019-06-11 15:55:30 -04004211// ~~~~~~ GrSwizzle stage ~~~~~~ //
4212
4213STAGE_PP(swizzle, void* ctx) {
4214 auto ir = r, ig = g, ib = b, ia = a;
4215 U16* o[] = {&r, &g, &b, &a};
4216 char swiz[4];
4217 memcpy(swiz, &ctx, sizeof(swiz));
4218
4219 for (int i = 0; i < 4; ++i) {
4220 switch (swiz[i]) {
4221 case 'r': *o[i] = ir; break;
4222 case 'g': *o[i] = ig; break;
4223 case 'b': *o[i] = ib; break;
4224 case 'a': *o[i] = ia; break;
Brian Salomonf30b1c12019-06-20 12:25:02 -04004225 case '0': *o[i] = U16(0); break;
Brian Salomon217522c2019-06-11 15:55:30 -04004226 case '1': *o[i] = U16(255); break;
4227 default: break;
4228 }
4229 }
4230}
4231
Mike Klein1b9b7d52018-02-27 10:37:40 -05004232// Now we'll add null stand-ins for stages we haven't implemented in lowp.
4233// If a pipeline uses these stages, it'll boot it out of lowp into highp.
Mike Klein8b0f9d12019-01-03 11:26:57 -05004234#define NOT_IMPLEMENTED(st) static void (*st)(void) = nullptr;
Mike Klein05bf9312018-12-19 10:05:03 -05004235 NOT_IMPLEMENTED(callback)
Brian Osman2b1a5442019-06-19 11:40:33 -04004236 NOT_IMPLEMENTED(interpreter)
Mike Klein05bf9312018-12-19 10:05:03 -05004237 NOT_IMPLEMENTED(unbounded_set_rgb)
4238 NOT_IMPLEMENTED(unbounded_uniform_color)
4239 NOT_IMPLEMENTED(unpremul)
Mike Klein5ece3632019-03-01 11:31:28 -06004240 NOT_IMPLEMENTED(dither) // TODO
Mike Klein05bf9312018-12-19 10:05:03 -05004241 NOT_IMPLEMENTED(from_srgb)
4242 NOT_IMPLEMENTED(to_srgb)
Brian Salomond608e222019-06-12 17:42:58 -04004243 NOT_IMPLEMENTED(load_16161616)
Robert Phillips17a3a0b2019-09-18 13:56:54 -04004244 NOT_IMPLEMENTED(load_16161616_dst)
Brian Salomond608e222019-06-12 17:42:58 -04004245 NOT_IMPLEMENTED(store_16161616)
Robert Phillips17a3a0b2019-09-18 13:56:54 -04004246 NOT_IMPLEMENTED(gather_16161616)
Brian Salomon217522c2019-06-11 15:55:30 -04004247 NOT_IMPLEMENTED(load_a16)
Robert Phillips429f0d32019-09-11 17:03:28 -04004248 NOT_IMPLEMENTED(load_a16_dst)
Brian Salomon217522c2019-06-11 15:55:30 -04004249 NOT_IMPLEMENTED(store_a16)
Robert Phillips429f0d32019-09-11 17:03:28 -04004250 NOT_IMPLEMENTED(gather_a16)
Brian Salomon217522c2019-06-11 15:55:30 -04004251 NOT_IMPLEMENTED(load_rg1616)
Robert Phillips429f0d32019-09-11 17:03:28 -04004252 NOT_IMPLEMENTED(load_rg1616_dst)
Brian Salomon217522c2019-06-11 15:55:30 -04004253 NOT_IMPLEMENTED(store_rg1616)
Robert Phillips429f0d32019-09-11 17:03:28 -04004254 NOT_IMPLEMENTED(gather_rg1616)
Mike Klein05bf9312018-12-19 10:05:03 -05004255 NOT_IMPLEMENTED(load_f16)
4256 NOT_IMPLEMENTED(load_f16_dst)
4257 NOT_IMPLEMENTED(store_f16)
4258 NOT_IMPLEMENTED(gather_f16)
Brian Salomon217522c2019-06-11 15:55:30 -04004259 NOT_IMPLEMENTED(load_af16)
Robert Phillips17a3a0b2019-09-18 13:56:54 -04004260 NOT_IMPLEMENTED(load_af16_dst)
Brian Salomon217522c2019-06-11 15:55:30 -04004261 NOT_IMPLEMENTED(store_af16)
Robert Phillips17a3a0b2019-09-18 13:56:54 -04004262 NOT_IMPLEMENTED(gather_af16)
Brian Salomon217522c2019-06-11 15:55:30 -04004263 NOT_IMPLEMENTED(load_rgf16)
Robert Phillips17a3a0b2019-09-18 13:56:54 -04004264 NOT_IMPLEMENTED(load_rgf16_dst)
Brian Salomon217522c2019-06-11 15:55:30 -04004265 NOT_IMPLEMENTED(store_rgf16)
Robert Phillips17a3a0b2019-09-18 13:56:54 -04004266 NOT_IMPLEMENTED(gather_rgf16)
Mike Klein05bf9312018-12-19 10:05:03 -05004267 NOT_IMPLEMENTED(load_f32)
4268 NOT_IMPLEMENTED(load_f32_dst)
4269 NOT_IMPLEMENTED(store_f32)
4270 NOT_IMPLEMENTED(gather_f32)
Brian Salomon217522c2019-06-11 15:55:30 -04004271 NOT_IMPLEMENTED(load_rgf32)
4272 NOT_IMPLEMENTED(store_rgf32)
Mike Klein05bf9312018-12-19 10:05:03 -05004273 NOT_IMPLEMENTED(load_1010102)
4274 NOT_IMPLEMENTED(load_1010102_dst)
4275 NOT_IMPLEMENTED(store_1010102)
4276 NOT_IMPLEMENTED(gather_1010102)
4277 NOT_IMPLEMENTED(store_u16_be)
Mike Klein5ece3632019-03-01 11:31:28 -06004278 NOT_IMPLEMENTED(byte_tables) // TODO
Mike Klein05bf9312018-12-19 10:05:03 -05004279 NOT_IMPLEMENTED(colorburn)
4280 NOT_IMPLEMENTED(colordodge)
4281 NOT_IMPLEMENTED(softlight)
4282 NOT_IMPLEMENTED(hue)
4283 NOT_IMPLEMENTED(saturation)
4284 NOT_IMPLEMENTED(color)
4285 NOT_IMPLEMENTED(luminosity)
4286 NOT_IMPLEMENTED(matrix_3x3)
4287 NOT_IMPLEMENTED(matrix_3x4)
Mike Klein5ece3632019-03-01 11:31:28 -06004288 NOT_IMPLEMENTED(matrix_4x5) // TODO
4289 NOT_IMPLEMENTED(matrix_4x3) // TODO
Mike Klein05bf9312018-12-19 10:05:03 -05004290 NOT_IMPLEMENTED(parametric)
Mike Klein1ce03a62019-04-23 08:00:35 -05004291 NOT_IMPLEMENTED(gamma_)
Brian Osman11e6aa82019-10-16 13:58:42 -04004292 NOT_IMPLEMENTED(PQish)
4293 NOT_IMPLEMENTED(HLGish)
4294 NOT_IMPLEMENTED(HLGinvish)
Mike Klein05bf9312018-12-19 10:05:03 -05004295 NOT_IMPLEMENTED(rgb_to_hsl)
4296 NOT_IMPLEMENTED(hsl_to_rgb)
Mike Klein5ece3632019-03-01 11:31:28 -06004297 NOT_IMPLEMENTED(gauss_a_to_rgba) // TODO
4298 NOT_IMPLEMENTED(mirror_x) // TODO
4299 NOT_IMPLEMENTED(repeat_x) // TODO
4300 NOT_IMPLEMENTED(mirror_y) // TODO
4301 NOT_IMPLEMENTED(repeat_y) // TODO
Mike Klein05bf9312018-12-19 10:05:03 -05004302 NOT_IMPLEMENTED(negate_x)
Mike Klein01005622019-08-13 12:22:17 -04004303 NOT_IMPLEMENTED(bicubic) // TODO if I can figure out negative weights
Mike Reed78eedba2019-07-31 16:39:15 -04004304 NOT_IMPLEMENTED(bicubic_clamp_8888)
Mike Klein5ece3632019-03-01 11:31:28 -06004305 NOT_IMPLEMENTED(bilinear_nx) // TODO
4306 NOT_IMPLEMENTED(bilinear_ny) // TODO
4307 NOT_IMPLEMENTED(bilinear_px) // TODO
4308 NOT_IMPLEMENTED(bilinear_py) // TODO
4309 NOT_IMPLEMENTED(bicubic_n3x) // TODO
4310 NOT_IMPLEMENTED(bicubic_n1x) // TODO
4311 NOT_IMPLEMENTED(bicubic_p1x) // TODO
4312 NOT_IMPLEMENTED(bicubic_p3x) // TODO
4313 NOT_IMPLEMENTED(bicubic_n3y) // TODO
4314 NOT_IMPLEMENTED(bicubic_n1y) // TODO
4315 NOT_IMPLEMENTED(bicubic_p1y) // TODO
4316 NOT_IMPLEMENTED(bicubic_p3y) // TODO
4317 NOT_IMPLEMENTED(save_xy) // TODO
4318 NOT_IMPLEMENTED(accumulate) // TODO
Mike Klein05bf9312018-12-19 10:05:03 -05004319 NOT_IMPLEMENTED(xy_to_2pt_conical_well_behaved)
4320 NOT_IMPLEMENTED(xy_to_2pt_conical_strip)
4321 NOT_IMPLEMENTED(xy_to_2pt_conical_focal_on_circle)
4322 NOT_IMPLEMENTED(xy_to_2pt_conical_smaller)
4323 NOT_IMPLEMENTED(xy_to_2pt_conical_greater)
4324 NOT_IMPLEMENTED(alter_2pt_conical_compensate_focal)
4325 NOT_IMPLEMENTED(alter_2pt_conical_unswap)
4326 NOT_IMPLEMENTED(mask_2pt_conical_nan)
4327 NOT_IMPLEMENTED(mask_2pt_conical_degenerates)
4328 NOT_IMPLEMENTED(apply_vector_mask)
4329#undef NOT_IMPLEMENTED
Mike Klein1b9b7d52018-02-27 10:37:40 -05004330
Mike Klein1b9b7d52018-02-27 10:37:40 -05004331#endif//defined(JUMPER_IS_SCALAR) controlling whether we build lowp stages
4332} // namespace lowp
4333
4334} // namespace SK_OPTS_NS
4335
4336#endif//SkRasterPipeline_opts_DEFINED