Mike Klein | baaf8ad | 2016-09-29 09:04:15 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2016 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | #ifndef SkRasterPipeline_opts_DEFINED |
| 9 | #define SkRasterPipeline_opts_DEFINED |
| 10 | |
Mike Klein | 1f49f26 | 2016-10-31 19:49:27 -0400 | [diff] [blame] | 11 | #include "SkColorPriv.h" |
raftias | 2563601 | 2016-11-11 15:27:39 -0800 | [diff] [blame] | 12 | #include "SkColorLookUpTable.h" |
Matt Sarett | db4d406 | 2016-11-16 16:07:15 -0500 | [diff] [blame] | 13 | #include "SkColorSpaceXform_A2B.h" |
| 14 | #include "SkColorSpaceXformPriv.h" |
Mike Klein | baaf8ad | 2016-09-29 09:04:15 -0400 | [diff] [blame] | 15 | #include "SkHalf.h" |
Mike Klein | 46e66a2 | 2016-11-21 16:19:34 -0500 | [diff] [blame] | 16 | #include "SkImageShaderContext.h" |
Mike Klein | a0c4c34 | 2016-11-29 13:58:49 -0500 | [diff] [blame] | 17 | #include "SkMSAN.h" |
Mike Klein | baaf8ad | 2016-09-29 09:04:15 -0400 | [diff] [blame] | 18 | #include "SkPM4f.h" |
mtklein | 125b2aa | 2016-11-04 13:41:34 -0700 | [diff] [blame] | 19 | #include "SkPM4fPriv.h" |
Mike Klein | baaf8ad | 2016-09-29 09:04:15 -0400 | [diff] [blame] | 20 | #include "SkRasterPipeline.h" |
| 21 | #include "SkSRGB.h" |
| 22 | |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 23 | namespace { |
| 24 | |
Mike Klein | 2878e76 | 2016-10-19 21:05:17 -0400 | [diff] [blame] | 25 | #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2 |
| 26 | static constexpr int N = 8; |
| 27 | #else |
| 28 | static constexpr int N = 4; |
| 29 | #endif |
| 30 | |
mtklein | a4a4488 | 2016-11-04 13:20:07 -0700 | [diff] [blame] | 31 | using SkNf = SkNx<N, float>; |
Mike Klein | d5de013 | 2016-11-28 09:33:02 -0500 | [diff] [blame] | 32 | using SkNi = SkNx<N, int32_t>; |
| 33 | using SkNu = SkNx<N, uint32_t>; |
mtklein | a4a4488 | 2016-11-04 13:20:07 -0700 | [diff] [blame] | 34 | using SkNh = SkNx<N, uint16_t>; |
Mike Klein | 06a65e2 | 2016-11-17 12:39:09 -0500 | [diff] [blame] | 35 | using SkNb = SkNx<N, uint8_t>; |
Mike Klein | baaf8ad | 2016-09-29 09:04:15 -0400 | [diff] [blame] | 36 | |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 37 | using Fn = void(SK_VECTORCALL *)(size_t x_tail, void** p, SkNf,SkNf,SkNf,SkNf, |
| 38 | SkNf,SkNf,SkNf,SkNf); |
Mike Klein | bddd234 | 2016-11-29 12:46:58 -0500 | [diff] [blame] | 39 | // x_tail encodes two values x and tail as x*N+tail, where 0 <= tail < N. |
| 40 | // x is the induction variable we're walking along, incrementing by N each step. |
| 41 | // tail == 0 means work with a full N pixels; otherwise use only the low tail pixels. |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 42 | // |
| 43 | // p is our program, a sequence of Fn to call interlaced with any void* context pointers. E.g. |
| 44 | // &load_8888 |
| 45 | // (src ptr) |
| 46 | // &from_srgb |
Mike Klein | 8c8cb5b | 2017-01-06 10:21:56 -0500 | [diff] [blame] | 47 | // &move_src_dst |
| 48 | // &load_f16 |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 49 | // (dst ptr) |
Mike Klein | 8c8cb5b | 2017-01-06 10:21:56 -0500 | [diff] [blame] | 50 | // &swap |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 51 | // &srcover |
| 52 | // &store_f16 |
| 53 | // (dst ptr) |
| 54 | // &just_return |
Mike Klein | bddd234 | 2016-11-29 12:46:58 -0500 | [diff] [blame] | 55 | |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 56 | } // namespace |
Mike Klein | 2878e76 | 2016-10-19 21:05:17 -0400 | [diff] [blame] | 57 | |
Mike Klein | 04adfda | 2016-10-12 09:52:55 -0400 | [diff] [blame] | 58 | #define SI static inline |
Mike Klein | baaf8ad | 2016-09-29 09:04:15 -0400 | [diff] [blame] | 59 | |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 60 | // Basically, return *(*ptr)++, maybe faster than the compiler can do it. |
| 61 | SI void* load_and_increment(void*** ptr) { |
| 62 | // We do this often enough that it's worth hyper-optimizing. |
| 63 | // x86 can do this in one instruction if ptr is in rsi. |
| 64 | // (This is why p is the second argument to Fn: it's passed in rsi.) |
| 65 | #if defined(__GNUC__) && defined(__x86_64__) |
| 66 | void* rax; |
| 67 | __asm__("lodsq" : "=a"(rax), "+S"(*ptr)); |
| 68 | return rax; |
| 69 | #else |
| 70 | return *(*ptr)++; |
| 71 | #endif |
| 72 | } |
| 73 | |
Mike Klein | 49372e6 | 2016-10-20 18:05:23 -0400 | [diff] [blame] | 74 | // Stages are logically a pipeline, and physically are contiguous in an array. |
| 75 | // To get to the next stage, we just increment our pointer to the next array element. |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 76 | SI void SK_VECTORCALL next(size_t x_tail, void** p, SkNf r, SkNf g, SkNf b, SkNf a, |
| 77 | SkNf dr, SkNf dg, SkNf db, SkNf da) { |
| 78 | auto next = (Fn)load_and_increment(&p); |
| 79 | next(x_tail,p, r,g,b,a, dr,dg,db,da); |
Mike Klein | 49372e6 | 2016-10-20 18:05:23 -0400 | [diff] [blame] | 80 | } |
| 81 | |
Mike Klein | 729b582 | 2016-11-28 18:23:23 -0500 | [diff] [blame] | 82 | // Stages defined below always call next. |
| 83 | // This is always the last stage, a backstop that actually returns to the caller when done. |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 84 | SI void SK_VECTORCALL just_return(size_t, void**, SkNf, SkNf, SkNf, SkNf, |
Mike Klein | 729b582 | 2016-11-28 18:23:23 -0500 | [diff] [blame] | 85 | SkNf, SkNf, SkNf, SkNf) {} |
| 86 | |
Mike Klein | 2cbc33d | 2016-11-28 16:30:30 -0500 | [diff] [blame] | 87 | #define STAGE(name) \ |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 88 | static SK_ALWAYS_INLINE void name##_kernel(size_t x, size_t tail, \ |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 89 | SkNf& r, SkNf& g, SkNf& b, SkNf& a, \ |
| 90 | SkNf& dr, SkNf& dg, SkNf& db, SkNf& da); \ |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 91 | SI void SK_VECTORCALL name(size_t x_tail, void** p, \ |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 92 | SkNf r, SkNf g, SkNf b, SkNf a, \ |
| 93 | SkNf dr, SkNf dg, SkNf db, SkNf da) { \ |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 94 | name##_kernel(x_tail/N, x_tail%N, r,g,b,a, dr,dg,db,da); \ |
| 95 | next(x_tail,p, r,g,b,a, dr,dg,db,da); \ |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 96 | } \ |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 97 | static SK_ALWAYS_INLINE void name##_kernel(size_t x, size_t tail, \ |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 98 | SkNf& r, SkNf& g, SkNf& b, SkNf& a, \ |
Mike Klein | 04adfda | 2016-10-12 09:52:55 -0400 | [diff] [blame] | 99 | SkNf& dr, SkNf& dg, SkNf& db, SkNf& da) |
Mike Klein | baaf8ad | 2016-09-29 09:04:15 -0400 | [diff] [blame] | 100 | |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 101 | #define STAGE_CTX(name, Ctx) \ |
| 102 | static SK_ALWAYS_INLINE void name##_kernel(Ctx ctx, size_t x, size_t tail, \ |
| 103 | SkNf& r, SkNf& g, SkNf& b, SkNf& a, \ |
| 104 | SkNf& dr, SkNf& dg, SkNf& db, SkNf& da); \ |
| 105 | SI void SK_VECTORCALL name(size_t x_tail, void** p, \ |
| 106 | SkNf r, SkNf g, SkNf b, SkNf a, \ |
| 107 | SkNf dr, SkNf dg, SkNf db, SkNf da) { \ |
| 108 | auto ctx = (Ctx)load_and_increment(&p); \ |
| 109 | name##_kernel(ctx, x_tail/N, x_tail%N, r,g,b,a, dr,dg,db,da); \ |
| 110 | next(x_tail,p, r,g,b,a, dr,dg,db,da); \ |
| 111 | } \ |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 112 | static SK_ALWAYS_INLINE void name##_kernel(Ctx ctx, size_t x, size_t tail, \ |
| 113 | SkNf& r, SkNf& g, SkNf& b, SkNf& a, \ |
| 114 | SkNf& dr, SkNf& dg, SkNf& db, SkNf& da) |
Mike Klein | baaf8ad | 2016-09-29 09:04:15 -0400 | [diff] [blame] | 115 | |
Mike Klein | 9161ef0 | 2016-10-04 14:03:27 -0400 | [diff] [blame] | 116 | // Many xfermodes apply the same logic to each channel. |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 117 | #define RGBA_XFERMODE(name) \ |
| 118 | static SK_ALWAYS_INLINE SkNf name##_kernel(const SkNf& s, const SkNf& sa, \ |
| 119 | const SkNf& d, const SkNf& da); \ |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 120 | SI void SK_VECTORCALL name(size_t x_tail, void** p, \ |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 121 | SkNf r, SkNf g, SkNf b, SkNf a, \ |
| 122 | SkNf dr, SkNf dg, SkNf db, SkNf da) { \ |
| 123 | r = name##_kernel(r,a,dr,da); \ |
| 124 | g = name##_kernel(g,a,dg,da); \ |
| 125 | b = name##_kernel(b,a,db,da); \ |
| 126 | a = name##_kernel(a,a,da,da); \ |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 127 | next(x_tail,p, r,g,b,a, dr,dg,db,da); \ |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 128 | } \ |
| 129 | static SK_ALWAYS_INLINE SkNf name##_kernel(const SkNf& s, const SkNf& sa, \ |
Mike Klein | 04adfda | 2016-10-12 09:52:55 -0400 | [diff] [blame] | 130 | const SkNf& d, const SkNf& da) |
Mike Klein | 9161ef0 | 2016-10-04 14:03:27 -0400 | [diff] [blame] | 131 | |
| 132 | // Most of the rest apply the same logic to color channels and use srcover's alpha logic. |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 133 | #define RGB_XFERMODE(name) \ |
| 134 | static SK_ALWAYS_INLINE SkNf name##_kernel(const SkNf& s, const SkNf& sa, \ |
| 135 | const SkNf& d, const SkNf& da); \ |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 136 | SI void SK_VECTORCALL name(size_t x_tail, void** p, \ |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 137 | SkNf r, SkNf g, SkNf b, SkNf a, \ |
| 138 | SkNf dr, SkNf dg, SkNf db, SkNf da) { \ |
| 139 | r = name##_kernel(r,a,dr,da); \ |
| 140 | g = name##_kernel(g,a,dg,da); \ |
| 141 | b = name##_kernel(b,a,db,da); \ |
| 142 | a = a + (da * (1.0f-a)); \ |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 143 | next(x_tail,p, r,g,b,a, dr,dg,db,da); \ |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 144 | } \ |
| 145 | static SK_ALWAYS_INLINE SkNf name##_kernel(const SkNf& s, const SkNf& sa, \ |
Mike Klein | 04adfda | 2016-10-12 09:52:55 -0400 | [diff] [blame] | 146 | const SkNf& d, const SkNf& da) |
| 147 | |
Mike Klein | 2cbc33d | 2016-11-28 16:30:30 -0500 | [diff] [blame] | 148 | template <typename T> |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 149 | SI SkNx<N,T> load(size_t tail, const T* src) { |
Mike Klein | 2cbc33d | 2016-11-28 16:30:30 -0500 | [diff] [blame] | 150 | if (tail) { |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 151 | T buf[8] = {0}; |
| 152 | switch (tail & (N-1)) { |
| 153 | case 7: buf[6] = src[6]; |
| 154 | case 6: buf[5] = src[5]; |
| 155 | case 5: buf[4] = src[4]; |
| 156 | case 4: buf[3] = src[3]; |
| 157 | case 3: buf[2] = src[2]; |
| 158 | case 2: buf[1] = src[1]; |
| 159 | } |
| 160 | buf[0] = src[0]; |
| 161 | return SkNx<N,T>::Load(buf); |
| 162 | } |
| 163 | return SkNx<N,T>::Load(src); |
| 164 | } |
Mike Klein | 2cbc33d | 2016-11-28 16:30:30 -0500 | [diff] [blame] | 165 | template <typename T> |
Mike Klein | 56b5079 | 2016-11-29 08:14:49 -0500 | [diff] [blame] | 166 | SI SkNx<N,T> gather(size_t tail, const T* src, const SkNi& offset) { |
| 167 | if (tail) { |
| 168 | T buf[8] = {0}; |
| 169 | switch (tail & (N-1)) { |
| 170 | case 7: buf[6] = src[offset[6]]; |
| 171 | case 6: buf[5] = src[offset[5]]; |
| 172 | case 5: buf[4] = src[offset[4]]; |
| 173 | case 4: buf[3] = src[offset[3]]; |
| 174 | case 3: buf[2] = src[offset[2]]; |
| 175 | case 2: buf[1] = src[offset[1]]; |
| 176 | } |
| 177 | buf[0] = src[offset[0]]; |
| 178 | return SkNx<N,T>::Load(buf); |
| 179 | } |
| 180 | T buf[8]; |
| 181 | for (size_t i = 0; i < N; i++) { |
| 182 | buf[i] = src[offset[i]]; |
| 183 | } |
| 184 | return SkNx<N,T>::Load(buf); |
| 185 | } |
| 186 | template <typename T> |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 187 | SI void store(size_t tail, const SkNx<N,T>& v, T* dst) { |
Mike Klein | 2cbc33d | 2016-11-28 16:30:30 -0500 | [diff] [blame] | 188 | if (tail) { |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 189 | switch (tail & (N-1)) { |
| 190 | case 7: dst[6] = v[6]; |
| 191 | case 6: dst[5] = v[5]; |
| 192 | case 5: dst[4] = v[4]; |
| 193 | case 4: dst[3] = v[3]; |
| 194 | case 3: dst[2] = v[2]; |
| 195 | case 2: dst[1] = v[1]; |
| 196 | } |
| 197 | dst[0] = v[0]; |
| 198 | return; |
| 199 | } |
| 200 | v.store(dst); |
| 201 | } |
| 202 | |
Mike Klein | 56b5079 | 2016-11-29 08:14:49 -0500 | [diff] [blame] | 203 | #if !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2 |
| 204 | SI __m256i mask(size_t tail) { |
| 205 | static const int masks[][8] = { |
| 206 | {~0,~0,~0,~0, ~0,~0,~0,~0 }, // remember, tail == 0 ~~> load all N |
| 207 | {~0, 0, 0, 0, 0, 0, 0, 0 }, |
| 208 | {~0,~0, 0, 0, 0, 0, 0, 0 }, |
| 209 | {~0,~0,~0, 0, 0, 0, 0, 0 }, |
| 210 | {~0,~0,~0,~0, 0, 0, 0, 0 }, |
| 211 | {~0,~0,~0,~0, ~0, 0, 0, 0 }, |
| 212 | {~0,~0,~0,~0, ~0,~0, 0, 0 }, |
| 213 | {~0,~0,~0,~0, ~0,~0,~0, 0 }, |
| 214 | }; |
| 215 | return SkNi::Load(masks + tail).fVec; |
| 216 | } |
| 217 | |
| 218 | SI SkNi load(size_t tail, const int32_t* src) { |
| 219 | return tail ? _mm256_maskload_epi32((const int*)src, mask(tail)) |
| 220 | : SkNi::Load(src); |
| 221 | } |
| 222 | SI SkNu load(size_t tail, const uint32_t* src) { |
| 223 | return tail ? _mm256_maskload_epi32((const int*)src, mask(tail)) |
| 224 | : SkNu::Load(src); |
| 225 | } |
Matt Sarett | f6878ba | 2016-12-01 14:46:12 -0500 | [diff] [blame] | 226 | SI SkNf load(size_t tail, const float* src) { |
| 227 | return tail ? _mm256_maskload_ps((const float*)src, mask(tail)) |
| 228 | : SkNf::Load(src); |
| 229 | } |
Mike Klein | 56b5079 | 2016-11-29 08:14:49 -0500 | [diff] [blame] | 230 | SI SkNi gather(size_t tail, const int32_t* src, const SkNi& offset) { |
Matt Sarett | f6878ba | 2016-12-01 14:46:12 -0500 | [diff] [blame] | 231 | auto m = mask(tail); |
| 232 | return _mm256_mask_i32gather_epi32(SkNi(0).fVec, (const int*)src, offset.fVec, m, 4); |
Mike Klein | 56b5079 | 2016-11-29 08:14:49 -0500 | [diff] [blame] | 233 | } |
| 234 | SI SkNu gather(size_t tail, const uint32_t* src, const SkNi& offset) { |
Matt Sarett | f6878ba | 2016-12-01 14:46:12 -0500 | [diff] [blame] | 235 | auto m = mask(tail); |
| 236 | return _mm256_mask_i32gather_epi32(SkNi(0).fVec, (const int*)src, offset.fVec, m, 4); |
| 237 | } |
| 238 | SI SkNf gather(size_t tail, const float* src, const SkNi& offset) { |
| 239 | auto m = _mm256_castsi256_ps(mask(tail)); |
| 240 | return _mm256_mask_i32gather_ps(SkNf(0).fVec, (const float*)src, offset.fVec, m, 4); |
Mike Klein | 56b5079 | 2016-11-29 08:14:49 -0500 | [diff] [blame] | 241 | } |
Mike Klein | a0c4c34 | 2016-11-29 13:58:49 -0500 | [diff] [blame] | 242 | |
| 243 | static const char* bug = "I don't think MSAN understands maskstore."; |
| 244 | |
Mike Klein | 56b5079 | 2016-11-29 08:14:49 -0500 | [diff] [blame] | 245 | SI void store(size_t tail, const SkNi& v, int32_t* dst) { |
Mike Klein | a0c4c34 | 2016-11-29 13:58:49 -0500 | [diff] [blame] | 246 | if (tail) { |
| 247 | _mm256_maskstore_epi32((int*)dst, mask(tail), v.fVec); |
| 248 | return sk_msan_mark_initialized(dst, dst+tail, bug); |
| 249 | } |
| 250 | v.store(dst); |
Mike Klein | 56b5079 | 2016-11-29 08:14:49 -0500 | [diff] [blame] | 251 | } |
| 252 | SI void store(size_t tail, const SkNu& v, uint32_t* dst) { |
Mike Klein | a0c4c34 | 2016-11-29 13:58:49 -0500 | [diff] [blame] | 253 | if (tail) { |
| 254 | _mm256_maskstore_epi32((int*)dst, mask(tail), v.fVec); |
| 255 | return sk_msan_mark_initialized(dst, dst+tail, bug); |
| 256 | } |
| 257 | v.store(dst); |
Mike Klein | 56b5079 | 2016-11-29 08:14:49 -0500 | [diff] [blame] | 258 | } |
Matt Sarett | f6878ba | 2016-12-01 14:46:12 -0500 | [diff] [blame] | 259 | SI void store(size_t tail, const SkNf& v, float* dst) { |
| 260 | if (tail) { |
| 261 | _mm256_maskstore_ps((float*)dst, mask(tail), v.fVec); |
| 262 | return sk_msan_mark_initialized(dst, dst+tail, bug); |
| 263 | } |
| 264 | v.store(dst); |
| 265 | } |
Mike Klein | 56b5079 | 2016-11-29 08:14:49 -0500 | [diff] [blame] | 266 | #endif |
| 267 | |
Mike Klein | e2e2ae2 | 2016-12-02 15:21:03 -0500 | [diff] [blame] | 268 | SI SkNf SkNf_fma(const SkNf& f, const SkNf& m, const SkNf& a) { return SkNx_fma(f,m,a); } |
| 269 | |
| 270 | SI SkNi SkNf_round(const SkNf& x, const SkNf& scale) { |
| 271 | // Every time I try, _mm_cvtps_epi32 benches as slower than using FMA and _mm_cvttps_epi32. :/ |
| 272 | return SkNx_cast<int>(SkNf_fma(x,scale, 0.5f)); |
| 273 | } |
| 274 | |
| 275 | SI SkNf SkNf_from_byte(const SkNi& x) { |
| 276 | // Same trick as in store_8888: 0x470000BB == 32768.0f + BB/256.0f for all bytes BB. |
| 277 | auto v = 0x47000000 | x; |
| 278 | // Read this as (pun_float(v) - 32768.0f) * (256/255.0f), redistributed to be an FMA. |
| 279 | return SkNf_fma(SkNf::Load(&v), 256/255.0f, -32768*256/255.0f); |
| 280 | } |
| 281 | SI SkNf SkNf_from_byte(const SkNu& x) { return SkNf_from_byte(SkNi::Load(&x)); } |
| 282 | SI SkNf SkNf_from_byte(const SkNb& x) { return SkNf_from_byte(SkNx_cast<int>(x)); } |
| 283 | |
Mike Klein | d5de013 | 2016-11-28 09:33:02 -0500 | [diff] [blame] | 284 | SI void from_8888(const SkNu& _8888, SkNf* r, SkNf* g, SkNf* b, SkNf* a) { |
Mike Klein | e2e2ae2 | 2016-12-02 15:21:03 -0500 | [diff] [blame] | 285 | *r = SkNf_from_byte((_8888 ) & 0xff); |
| 286 | *g = SkNf_from_byte((_8888 >> 8) & 0xff); |
| 287 | *b = SkNf_from_byte((_8888 >> 16) & 0xff); |
| 288 | *a = SkNf_from_byte((_8888 >> 24) ); |
Mike Klein | d5de013 | 2016-11-28 09:33:02 -0500 | [diff] [blame] | 289 | } |
Mike Klein | 6b77f1c | 2016-11-22 15:50:12 -0500 | [diff] [blame] | 290 | SI void from_4444(const SkNh& _4444, SkNf* r, SkNf* g, SkNf* b, SkNf* a) { |
| 291 | auto _32_bit = SkNx_cast<int>(_4444); |
| 292 | |
| 293 | *r = SkNx_cast<float>(_32_bit & (0xF << SK_R4444_SHIFT)) * (1.0f / (0xF << SK_R4444_SHIFT)); |
| 294 | *g = SkNx_cast<float>(_32_bit & (0xF << SK_G4444_SHIFT)) * (1.0f / (0xF << SK_G4444_SHIFT)); |
| 295 | *b = SkNx_cast<float>(_32_bit & (0xF << SK_B4444_SHIFT)) * (1.0f / (0xF << SK_B4444_SHIFT)); |
| 296 | *a = SkNx_cast<float>(_32_bit & (0xF << SK_A4444_SHIFT)) * (1.0f / (0xF << SK_A4444_SHIFT)); |
| 297 | } |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 298 | SI void from_565(const SkNh& _565, SkNf* r, SkNf* g, SkNf* b) { |
| 299 | auto _32_bit = SkNx_cast<int>(_565); |
| 300 | |
| 301 | *r = SkNx_cast<float>(_32_bit & SK_R16_MASK_IN_PLACE) * (1.0f / SK_R16_MASK_IN_PLACE); |
| 302 | *g = SkNx_cast<float>(_32_bit & SK_G16_MASK_IN_PLACE) * (1.0f / SK_G16_MASK_IN_PLACE); |
| 303 | *b = SkNx_cast<float>(_32_bit & SK_B16_MASK_IN_PLACE) * (1.0f / SK_B16_MASK_IN_PLACE); |
| 304 | } |
Mike Klein | 4958006 | 2016-12-11 11:42:07 -0500 | [diff] [blame] | 305 | SI void from_f16(const void* px, SkNf* r, SkNf* g, SkNf* b, SkNf* a) { |
| 306 | SkNh rh, gh, bh, ah; |
| 307 | SkNh::Load4(px, &rh, &gh, &bh, &ah); |
| 308 | |
| 309 | *r = SkHalfToFloat_finite_ftz(rh); |
| 310 | *g = SkHalfToFloat_finite_ftz(gh); |
| 311 | *b = SkHalfToFloat_finite_ftz(bh); |
| 312 | *a = SkHalfToFloat_finite_ftz(ah); |
| 313 | } |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 314 | |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 315 | STAGE_CTX(trace, const char*) { |
| 316 | SkDebugf("%s\n", ctx); |
Mike Klein | a9312fd | 2016-11-16 13:38:15 -0500 | [diff] [blame] | 317 | } |
Mike Klein | 2cbc33d | 2016-11-28 16:30:30 -0500 | [diff] [blame] | 318 | STAGE(registers) { |
Mike Klein | a9312fd | 2016-11-16 13:38:15 -0500 | [diff] [blame] | 319 | auto print = [](const char* name, const SkNf& v) { |
| 320 | SkDebugf("%s:", name); |
| 321 | for (int i = 0; i < N; i++) { |
| 322 | SkDebugf(" %g", v[i]); |
| 323 | } |
| 324 | SkDebugf("\n"); |
| 325 | }; |
| 326 | print(" r", r); |
| 327 | print(" g", g); |
| 328 | print(" b", b); |
| 329 | print(" a", a); |
| 330 | print("dr", dr); |
| 331 | print("dg", dg); |
| 332 | print("db", db); |
| 333 | print("da", da); |
| 334 | } |
| 335 | |
Mike Klein | 2cbc33d | 2016-11-28 16:30:30 -0500 | [diff] [blame] | 336 | STAGE(clamp_0) { |
Mike Klein | 130863e | 2016-10-27 11:29:36 -0400 | [diff] [blame] | 337 | a = SkNf::Max(a, 0.0f); |
| 338 | r = SkNf::Max(r, 0.0f); |
| 339 | g = SkNf::Max(g, 0.0f); |
| 340 | b = SkNf::Max(b, 0.0f); |
| 341 | } |
Mike Klein | d37d5d9 | 2016-12-14 13:38:24 +0000 | [diff] [blame] | 342 | STAGE(clamp_1) { |
| 343 | a = SkNf::Min(a, 1.0f); |
| 344 | r = SkNf::Min(r, 1.0f); |
| 345 | g = SkNf::Min(g, 1.0f); |
| 346 | b = SkNf::Min(b, 1.0f); |
| 347 | } |
Mike Klein | 2cbc33d | 2016-11-28 16:30:30 -0500 | [diff] [blame] | 348 | STAGE(clamp_a) { |
Mike Klein | 130863e | 2016-10-27 11:29:36 -0400 | [diff] [blame] | 349 | a = SkNf::Min(a, 1.0f); |
| 350 | r = SkNf::Min(r, a); |
| 351 | g = SkNf::Min(g, a); |
| 352 | b = SkNf::Min(b, a); |
| 353 | } |
Matt Sarett | db4d406 | 2016-11-16 16:07:15 -0500 | [diff] [blame] | 354 | |
Mike Klein | 2cbc33d | 2016-11-28 16:30:30 -0500 | [diff] [blame] | 355 | STAGE(unpremul) { |
Mike Klein | 5a13011 | 2016-11-28 09:48:31 -0500 | [diff] [blame] | 356 | auto scale = (a == 0.0f).thenElse(0.0f, 1.0f/a); |
| 357 | r *= scale; |
| 358 | g *= scale; |
| 359 | b *= scale; |
Mike Klein | eea7c16 | 2016-11-03 10:20:35 -0400 | [diff] [blame] | 360 | } |
Mike Klein | 2cbc33d | 2016-11-28 16:30:30 -0500 | [diff] [blame] | 361 | STAGE(premul) { |
Mike Klein | eea7c16 | 2016-11-03 10:20:35 -0400 | [diff] [blame] | 362 | r *= a; |
| 363 | g *= a; |
| 364 | b *= a; |
| 365 | } |
| 366 | |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 367 | STAGE_CTX(set_rgb, const float*) { |
| 368 | r = ctx[0]; |
| 369 | g = ctx[1]; |
| 370 | b = ctx[2]; |
Mike Klein | 7a14734d | 2016-11-29 15:33:39 -0500 | [diff] [blame] | 371 | } |
Mike Klein | 8c8cb5b | 2017-01-06 10:21:56 -0500 | [diff] [blame] | 372 | STAGE(swap_rb) { SkTSwap(r,b); } |
Mike Klein | 7a14734d | 2016-11-29 15:33:39 -0500 | [diff] [blame] | 373 | |
Mike Klein | 2cbc33d | 2016-11-28 16:30:30 -0500 | [diff] [blame] | 374 | STAGE(move_src_dst) { |
Mike Klein | c509341 | 2016-11-04 16:36:39 -0400 | [diff] [blame] | 375 | dr = r; |
| 376 | dg = g; |
| 377 | db = b; |
| 378 | da = a; |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 379 | } |
Mike Klein | 2cbc33d | 2016-11-28 16:30:30 -0500 | [diff] [blame] | 380 | STAGE(move_dst_src) { |
Mike Klein | d5de013 | 2016-11-28 09:33:02 -0500 | [diff] [blame] | 381 | r = dr; |
| 382 | g = dg; |
| 383 | b = db; |
| 384 | a = da; |
Mike Klein | fb191da | 2016-11-15 13:20:33 -0500 | [diff] [blame] | 385 | } |
Mike Klein | 8c8cb5b | 2017-01-06 10:21:56 -0500 | [diff] [blame] | 386 | STAGE(swap) { |
| 387 | SkTSwap(r,dr); |
| 388 | SkTSwap(g,dg); |
| 389 | SkTSwap(b,db); |
| 390 | SkTSwap(a,da); |
| 391 | } |
Mike Klein | d5de013 | 2016-11-28 09:33:02 -0500 | [diff] [blame] | 392 | |
Mike Klein | 729b582 | 2016-11-28 18:23:23 -0500 | [diff] [blame] | 393 | STAGE(from_srgb) { |
Mike Klein | b04c352 | 2016-11-28 11:55:58 -0500 | [diff] [blame] | 394 | r = sk_linear_from_srgb_math(r); |
| 395 | g = sk_linear_from_srgb_math(g); |
| 396 | b = sk_linear_from_srgb_math(b); |
| 397 | } |
Mike Klein | 2cbc33d | 2016-11-28 16:30:30 -0500 | [diff] [blame] | 398 | STAGE(to_srgb) { |
Mike Klein | e03339a | 2016-11-28 13:24:27 -0500 | [diff] [blame] | 399 | r = sk_linear_to_srgb_needs_round(r); |
| 400 | g = sk_linear_to_srgb_needs_round(g); |
| 401 | b = sk_linear_to_srgb_needs_round(b); |
Mike Klein | b04c352 | 2016-11-28 11:55:58 -0500 | [diff] [blame] | 402 | } |
Mike Klein | d5de013 | 2016-11-28 09:33:02 -0500 | [diff] [blame] | 403 | |
raftias | 9752454 | 2016-12-14 13:15:05 -0500 | [diff] [blame] | 404 | STAGE(from_2dot2) { |
| 405 | auto from_2dot2 = [](const SkNf& x) { |
| 406 | // x^(141/64) = x^(2.20312) is a great approximation of the true value, x^(2.2). |
| 407 | // (note: x^(35/16) = x^(2.1875) is an okay one as well and would be quicker) |
| 408 | auto x16 = x.rsqrt().rsqrt().rsqrt().rsqrt(); // x^(1/16) = x^(4/64); |
| 409 | auto x64 = x16.rsqrt().rsqrt(); // x^(1/64) |
| 410 | |
| 411 | // x^(141/64) = x^(128/64) * x^(12/64) * x^(1/64) |
| 412 | return SkNf::Max((x*x) * (x16*x16*x16) * (x64), 0.0f); |
| 413 | }; |
Mike Klein | e2b66a7 | 2017-01-04 09:59:25 -0500 | [diff] [blame] | 414 | |
raftias | 9752454 | 2016-12-14 13:15:05 -0500 | [diff] [blame] | 415 | r = from_2dot2(r); |
| 416 | g = from_2dot2(g); |
| 417 | b = from_2dot2(b); |
| 418 | } |
Matt Sarett | f6878ba | 2016-12-01 14:46:12 -0500 | [diff] [blame] | 419 | STAGE(to_2dot2) { |
| 420 | auto to_2dot2 = [](const SkNf& x) { |
| 421 | // x^(29/64) is a very good approximation of the true value, x^(1/2.2). |
| 422 | auto x2 = x.rsqrt(), // x^(-1/2) |
| 423 | x32 = x2.rsqrt().rsqrt().rsqrt().rsqrt(), // x^(-1/32) |
| 424 | x64 = x32.rsqrt(); // x^(+1/64) |
| 425 | |
| 426 | // 29 = 32 - 2 - 1 |
Matt Sarett | abf8ba3 | 2016-12-01 17:02:07 -0500 | [diff] [blame] | 427 | return SkNf::Max(x2.invert() * x32 * x64.invert(), 0.0f); // Watch out for NaN. |
Matt Sarett | f6878ba | 2016-12-01 14:46:12 -0500 | [diff] [blame] | 428 | }; |
| 429 | |
| 430 | r = to_2dot2(r); |
| 431 | g = to_2dot2(g); |
| 432 | b = to_2dot2(b); |
| 433 | } |
| 434 | |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 435 | // The default shader produces a constant color (from the SkPaint). |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 436 | STAGE_CTX(constant_color, const SkPM4f*) { |
| 437 | r = ctx->r(); |
| 438 | g = ctx->g(); |
| 439 | b = ctx->b(); |
| 440 | a = ctx->a(); |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 441 | } |
| 442 | |
Mike Klein | 319ba3d | 2017-01-20 15:11:54 -0500 | [diff] [blame] | 443 | // Set up registers with values relevant to shaders. |
| 444 | STAGE_CTX(seed_shader, const int*) { |
| 445 | int y = *ctx; |
| 446 | |
| 447 | static const float dx[] = { 0,1,2,3,4,5,6,7 }; |
| 448 | r = x + 0.5f + SkNf::Load(dx); // dst pixel center x coordinates |
| 449 | g = y + 0.5f; // dst pixel center y coordinate(s) |
| 450 | b = 1.0f; |
| 451 | a = 0.0f; |
| 452 | dr = dg = db = da = 0.0f; |
| 453 | } |
| 454 | |
Mike Klein | babd93e | 2016-11-30 16:05:10 -0500 | [diff] [blame] | 455 | // s' = sc for a scalar c. |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 456 | STAGE_CTX(scale_1_float, const float*) { |
| 457 | SkNf c = *ctx; |
Mike Klein | 6686617 | 2016-11-03 12:22:01 -0400 | [diff] [blame] | 458 | |
| 459 | r *= c; |
| 460 | g *= c; |
| 461 | b *= c; |
| 462 | a *= c; |
| 463 | } |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 464 | // s' = sc for 8-bit c. |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 465 | STAGE_CTX(scale_u8, const uint8_t**) { |
| 466 | auto ptr = *ctx + x; |
Mike Klein | 9ea894b | 2017-01-03 23:42:04 +0000 | [diff] [blame] | 467 | SkNf c = SkNf_from_byte(load(tail, ptr)); |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 468 | |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 469 | r = r*c; |
| 470 | g = g*c; |
| 471 | b = b*c; |
| 472 | a = a*c; |
| 473 | } |
| 474 | |
Mike Klein | 729b582 | 2016-11-28 18:23:23 -0500 | [diff] [blame] | 475 | SI SkNf lerp(const SkNf& from, const SkNf& to, const SkNf& cov) { |
Mike Klein | 87185f7 | 2016-12-01 18:22:26 -0500 | [diff] [blame] | 476 | return SkNf_fma(to-from, cov, from); |
Mike Klein | 729b582 | 2016-11-28 18:23:23 -0500 | [diff] [blame] | 477 | } |
| 478 | |
Mike Klein | babd93e | 2016-11-30 16:05:10 -0500 | [diff] [blame] | 479 | // s' = d(1-c) + sc, for a scalar c. |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 480 | STAGE_CTX(lerp_1_float, const float*) { |
| 481 | SkNf c = *ctx; |
Mike Klein | 729b582 | 2016-11-28 18:23:23 -0500 | [diff] [blame] | 482 | |
| 483 | r = lerp(dr, r, c); |
| 484 | g = lerp(dg, g, c); |
| 485 | b = lerp(db, b, c); |
| 486 | a = lerp(da, a, c); |
| 487 | } |
| 488 | |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 489 | // s' = d(1-c) + sc for 8-bit c. |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 490 | STAGE_CTX(lerp_u8, const uint8_t**) { |
| 491 | auto ptr = *ctx + x; |
Mike Klein | 9ea894b | 2017-01-03 23:42:04 +0000 | [diff] [blame] | 492 | SkNf c = SkNf_from_byte(load(tail, ptr)); |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 493 | |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 494 | r = lerp(dr, r, c); |
| 495 | g = lerp(dg, g, c); |
| 496 | b = lerp(db, b, c); |
| 497 | a = lerp(da, a, c); |
| 498 | } |
| 499 | |
| 500 | // s' = d(1-c) + sc for 565 c. |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 501 | STAGE_CTX(lerp_565, const uint16_t**) { |
| 502 | auto ptr = *ctx + x; |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 503 | SkNf cr, cg, cb; |
Mike Klein | 2cbc33d | 2016-11-28 16:30:30 -0500 | [diff] [blame] | 504 | from_565(load(tail, ptr), &cr, &cg, &cb); |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 505 | |
| 506 | r = lerp(dr, r, cr); |
| 507 | g = lerp(dg, g, cg); |
| 508 | b = lerp(db, b, cb); |
| 509 | a = 1.0f; |
| 510 | } |
| 511 | |
Mike Klein | e71b167 | 2017-01-13 07:59:23 -0500 | [diff] [blame] | 512 | STAGE_CTX(load_a8, const uint8_t**) { |
| 513 | auto ptr = *ctx + x; |
| 514 | r = g = b = 0.0f; |
| 515 | a = SkNf_from_byte(load(tail, ptr)); |
| 516 | } |
| 517 | STAGE_CTX(store_a8, uint8_t**) { |
| 518 | auto ptr = *ctx + x; |
| 519 | store(tail, SkNx_cast<uint8_t>(SkNf_round(255.0f, a)), ptr); |
| 520 | } |
| 521 | |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 522 | STAGE_CTX(load_565, const uint16_t**) { |
| 523 | auto ptr = *ctx + x; |
Mike Klein | 2cbc33d | 2016-11-28 16:30:30 -0500 | [diff] [blame] | 524 | from_565(load(tail, ptr), &r,&g,&b); |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 525 | a = 1.0f; |
| 526 | } |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 527 | STAGE_CTX(store_565, uint16_t**) { |
| 528 | auto ptr = *ctx + x; |
Mike Klein | 9c77ea1 | 2016-12-02 08:29:10 -0500 | [diff] [blame] | 529 | store(tail, SkNx_cast<uint16_t>( SkNf_round(r, SK_R16_MASK) << SK_R16_SHIFT |
| 530 | | SkNf_round(g, SK_G16_MASK) << SK_G16_SHIFT |
| 531 | | SkNf_round(b, SK_B16_MASK) << SK_B16_SHIFT), ptr); |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 532 | } |
| 533 | |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 534 | |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 535 | STAGE_CTX(load_f16, const uint64_t**) { |
| 536 | auto ptr = *ctx + x; |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 537 | |
Mike Klein | 4958006 | 2016-12-11 11:42:07 -0500 | [diff] [blame] | 538 | const void* src = ptr; |
| 539 | SkNx<N, uint64_t> px; |
Mike Klein | 2cbc33d | 2016-11-28 16:30:30 -0500 | [diff] [blame] | 540 | if (tail) { |
Mike Klein | 4958006 | 2016-12-11 11:42:07 -0500 | [diff] [blame] | 541 | px = load(tail, ptr); |
| 542 | src = &px; |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 543 | } |
Mike Klein | 4958006 | 2016-12-11 11:42:07 -0500 | [diff] [blame] | 544 | from_f16(src, &r, &g, &b, &a); |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 545 | } |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 546 | STAGE_CTX(store_f16, uint64_t**) { |
| 547 | auto ptr = *ctx + x; |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 548 | |
Mike Klein | 4958006 | 2016-12-11 11:42:07 -0500 | [diff] [blame] | 549 | SkNx<N, uint64_t> px; |
| 550 | SkNh::Store4(tail ? (void*)&px : (void*)ptr, SkFloatToHalf_finite_ftz(r), |
| 551 | SkFloatToHalf_finite_ftz(g), |
| 552 | SkFloatToHalf_finite_ftz(b), |
| 553 | SkFloatToHalf_finite_ftz(a)); |
Mike Klein | 2cbc33d | 2016-11-28 16:30:30 -0500 | [diff] [blame] | 554 | if (tail) { |
Mike Klein | 4958006 | 2016-12-11 11:42:07 -0500 | [diff] [blame] | 555 | store(tail, px, ptr); |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 556 | } |
| 557 | } |
| 558 | |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 559 | STAGE_CTX(store_f32, SkPM4f**) { |
| 560 | auto ptr = *ctx + x; |
mtklein | a4a4488 | 2016-11-04 13:20:07 -0700 | [diff] [blame] | 561 | |
Mike Klein | 4958006 | 2016-12-11 11:42:07 -0500 | [diff] [blame] | 562 | SkNx<N, SkPM4f> px; |
| 563 | SkNf::Store4(tail ? (void*)&px : (void*)ptr, r,g,b,a); |
Mike Klein | 2cbc33d | 2016-11-28 16:30:30 -0500 | [diff] [blame] | 564 | if (tail) { |
Mike Klein | 4958006 | 2016-12-11 11:42:07 -0500 | [diff] [blame] | 565 | store(tail, px, ptr); |
mtklein | a4a4488 | 2016-11-04 13:20:07 -0700 | [diff] [blame] | 566 | } |
| 567 | } |
| 568 | |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 569 | |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 570 | STAGE_CTX(load_8888, const uint32_t**) { |
| 571 | auto ptr = *ctx + x; |
Mike Klein | 2cbc33d | 2016-11-28 16:30:30 -0500 | [diff] [blame] | 572 | from_8888(load(tail, ptr), &r, &g, &b, &a); |
raftias | 2563601 | 2016-11-11 15:27:39 -0800 | [diff] [blame] | 573 | } |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 574 | STAGE_CTX(store_8888, uint32_t**) { |
Mike Klein | 3e05671 | 2016-12-02 14:22:57 -0500 | [diff] [blame] | 575 | auto byte = [](const SkNf& x, int ix) { |
| 576 | // Here's a neat trick: 0x47000000 == 32768.0f, and 0x470000ff == 32768.0f + (255/256.0f). |
| 577 | auto v = SkNf_fma(255/256.0f, x, 32768.0f); |
| 578 | switch (ix) { |
| 579 | case 0: return SkNi::Load(&v) & 0xff; // R |
| 580 | case 3: return SkNi::Load(&v) << 24; // A |
| 581 | } |
| 582 | return (SkNi::Load(&v) & 0xff) << (8*ix); // B or G |
| 583 | }; |
| 584 | |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 585 | auto ptr = *ctx + x; |
Mike Klein | 3e05671 | 2016-12-02 14:22:57 -0500 | [diff] [blame] | 586 | store(tail, byte(r,0)|byte(g,1)|byte(b,2)|byte(a,3), (int*)ptr); |
raftias | 2563601 | 2016-11-11 15:27:39 -0800 | [diff] [blame] | 587 | } |
| 588 | |
Matt Sarett | 379938e | 2017-01-12 18:34:29 -0500 | [diff] [blame] | 589 | STAGE_CTX(load_u16_be, const uint64_t**) { |
| 590 | auto ptr = *ctx + x; |
| 591 | const void* src = ptr; |
| 592 | SkNx<N, uint64_t> px; |
| 593 | if (tail) { |
| 594 | px = load(tail, ptr); |
| 595 | src = &px; |
| 596 | } |
| 597 | |
| 598 | SkNh rh, gh, bh, ah; |
| 599 | SkNh::Load4(src, &rh, &gh, &bh, &ah); |
| 600 | r = (1.0f / 65535.0f) * SkNx_cast<float>((rh << 8) | (rh >> 8)); |
| 601 | g = (1.0f / 65535.0f) * SkNx_cast<float>((gh << 8) | (gh >> 8)); |
| 602 | b = (1.0f / 65535.0f) * SkNx_cast<float>((bh << 8) | (bh >> 8)); |
| 603 | a = (1.0f / 65535.0f) * SkNx_cast<float>((ah << 8) | (ah >> 8)); |
| 604 | } |
| 605 | |
Matt Sarett | 5bee0b6 | 2017-01-19 12:04:32 -0500 | [diff] [blame] | 606 | STAGE_CTX(load_rgb_u16_be, const uint16_t**) { |
| 607 | auto ptr = *ctx + 3*x; |
| 608 | const void* src = ptr; |
| 609 | uint16_t buf[N*3] = {0}; |
| 610 | if (tail) { |
| 611 | memcpy(buf, src, tail*3*sizeof(uint16_t)); |
| 612 | src = buf; |
| 613 | } |
| 614 | |
| 615 | SkNh rh, gh, bh; |
| 616 | SkNh::Load3(src, &rh, &gh, &bh); |
| 617 | r = (1.0f / 65535.0f) * SkNx_cast<float>((rh << 8) | (rh >> 8)); |
| 618 | g = (1.0f / 65535.0f) * SkNx_cast<float>((gh << 8) | (gh >> 8)); |
| 619 | b = (1.0f / 65535.0f) * SkNx_cast<float>((bh << 8) | (bh >> 8)); |
| 620 | a = 1.0f; |
| 621 | } |
| 622 | |
Matt Sarett | 1da27ef | 2017-01-19 17:14:07 -0500 | [diff] [blame] | 623 | STAGE_CTX(store_u16_be, uint64_t**) { |
| 624 | auto to_u16_be = [](const SkNf& x) { |
| 625 | SkNh x16 = SkNx_cast<uint16_t>(65535.0f * x); |
| 626 | return (x16 << 8) | (x16 >> 8); |
| 627 | }; |
| 628 | |
| 629 | auto ptr = *ctx + x; |
| 630 | SkNx<N, uint64_t> px; |
| 631 | SkNh::Store4(tail ? (void*)&px : (void*)ptr, to_u16_be(r), |
| 632 | to_u16_be(g), |
| 633 | to_u16_be(b), |
| 634 | to_u16_be(a)); |
| 635 | if (tail) { |
| 636 | store(tail, px, ptr); |
| 637 | } |
| 638 | } |
| 639 | |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 640 | STAGE_CTX(load_tables, const LoadTablesContext*) { |
Matt Sarett | 379938e | 2017-01-12 18:34:29 -0500 | [diff] [blame] | 641 | auto ptr = (const uint32_t*)ctx->fSrc + x; |
Matt Sarett | f6878ba | 2016-12-01 14:46:12 -0500 | [diff] [blame] | 642 | |
| 643 | SkNu rgba = load(tail, ptr); |
| 644 | auto to_int = [](const SkNu& v) { return SkNi::Load(&v); }; |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 645 | r = gather(tail, ctx->fR, to_int((rgba >> 0) & 0xff)); |
| 646 | g = gather(tail, ctx->fG, to_int((rgba >> 8) & 0xff)); |
| 647 | b = gather(tail, ctx->fB, to_int((rgba >> 16) & 0xff)); |
Mike Klein | e2e2ae2 | 2016-12-02 15:21:03 -0500 | [diff] [blame] | 648 | a = SkNf_from_byte(rgba >> 24); |
Matt Sarett | f6878ba | 2016-12-01 14:46:12 -0500 | [diff] [blame] | 649 | } |
| 650 | |
Matt Sarett | 379938e | 2017-01-12 18:34:29 -0500 | [diff] [blame] | 651 | STAGE_CTX(load_tables_u16_be, const LoadTablesContext*) { |
| 652 | auto ptr = (const uint64_t*)ctx->fSrc + x; |
Matt Sarett | c55bc9a | 2017-01-13 13:58:57 -0500 | [diff] [blame] | 653 | const void* src = ptr; |
| 654 | SkNx<N, uint64_t> px; |
| 655 | if (tail) { |
| 656 | px = load(tail, ptr); |
| 657 | src = &px; |
| 658 | } |
Matt Sarett | 379938e | 2017-01-12 18:34:29 -0500 | [diff] [blame] | 659 | |
| 660 | SkNh rh, gh, bh, ah; |
Matt Sarett | c55bc9a | 2017-01-13 13:58:57 -0500 | [diff] [blame] | 661 | SkNh::Load4(src, &rh, &gh, &bh, &ah); |
Matt Sarett | 379938e | 2017-01-12 18:34:29 -0500 | [diff] [blame] | 662 | |
| 663 | // ctx->fSrc is big-endian, so "& 0xff" grabs the 8 most significant bits of each component. |
| 664 | r = gather(tail, ctx->fR, SkNx_cast<int>(rh & 0xff)); |
| 665 | g = gather(tail, ctx->fG, SkNx_cast<int>(gh & 0xff)); |
| 666 | b = gather(tail, ctx->fB, SkNx_cast<int>(bh & 0xff)); |
| 667 | a = (1.0f / 65535.0f) * SkNx_cast<float>((ah << 8) | (ah >> 8)); |
| 668 | } |
| 669 | |
Matt Sarett | 5bee0b6 | 2017-01-19 12:04:32 -0500 | [diff] [blame] | 670 | STAGE_CTX(load_tables_rgb_u16_be, const LoadTablesContext*) { |
| 671 | auto ptr = (const uint16_t*)ctx->fSrc + 3*x; |
| 672 | const void* src = ptr; |
| 673 | uint16_t buf[N*3] = {0}; |
| 674 | if (tail) { |
| 675 | memcpy(buf, src, tail*3*sizeof(uint16_t)); |
| 676 | src = buf; |
| 677 | } |
| 678 | |
| 679 | SkNh rh, gh, bh; |
| 680 | SkNh::Load3(src, &rh, &gh, &bh); |
| 681 | |
| 682 | // ctx->fSrc is big-endian, so "& 0xff" grabs the 8 most significant bits of each component. |
| 683 | r = gather(tail, ctx->fR, SkNx_cast<int>(rh & 0xff)); |
| 684 | g = gather(tail, ctx->fG, SkNx_cast<int>(gh & 0xff)); |
| 685 | b = gather(tail, ctx->fB, SkNx_cast<int>(bh & 0xff)); |
| 686 | a = 1.0f; |
| 687 | } |
| 688 | |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 689 | STAGE_CTX(store_tables, const StoreTablesContext*) { |
| 690 | auto ptr = ctx->fDst + x; |
Matt Sarett | f6878ba | 2016-12-01 14:46:12 -0500 | [diff] [blame] | 691 | |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 692 | float scale = ctx->fCount - 1; |
Mike Klein | 9c77ea1 | 2016-12-02 08:29:10 -0500 | [diff] [blame] | 693 | SkNi ri = SkNf_round(scale, r); |
| 694 | SkNi gi = SkNf_round(scale, g); |
| 695 | SkNi bi = SkNf_round(scale, b); |
Matt Sarett | f6878ba | 2016-12-01 14:46:12 -0500 | [diff] [blame] | 696 | |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 697 | store(tail, ( SkNx_cast<int>(gather(tail, ctx->fR, ri)) << 0 |
| 698 | | SkNx_cast<int>(gather(tail, ctx->fG, gi)) << 8 |
| 699 | | SkNx_cast<int>(gather(tail, ctx->fB, bi)) << 16 |
| 700 | | SkNf_round(255.0f, a) << 24), (int*)ptr); |
Matt Sarett | f6878ba | 2016-12-01 14:46:12 -0500 | [diff] [blame] | 701 | } |
| 702 | |
Mike Klein | 729b582 | 2016-11-28 18:23:23 -0500 | [diff] [blame] | 703 | SI SkNf inv(const SkNf& x) { return 1.0f - x; } |
| 704 | |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 705 | RGBA_XFERMODE(clear) { return 0.0f; } |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 706 | RGBA_XFERMODE(srcatop) { return s*da + d*inv(sa); } |
| 707 | RGBA_XFERMODE(srcin) { return s * da; } |
| 708 | RGBA_XFERMODE(srcout) { return s * inv(da); } |
Mike Klein | 87185f7 | 2016-12-01 18:22:26 -0500 | [diff] [blame] | 709 | RGBA_XFERMODE(srcover) { return SkNf_fma(d, inv(sa), s); } |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 710 | RGBA_XFERMODE(dstatop) { return srcatop_kernel(d,da,s,sa); } |
| 711 | RGBA_XFERMODE(dstin) { return srcin_kernel (d,da,s,sa); } |
| 712 | RGBA_XFERMODE(dstout) { return srcout_kernel (d,da,s,sa); } |
| 713 | RGBA_XFERMODE(dstover) { return srcover_kernel(d,da,s,sa); } |
| 714 | |
| 715 | RGBA_XFERMODE(modulate) { return s*d; } |
| 716 | RGBA_XFERMODE(multiply) { return s*inv(da) + d*inv(sa) + s*d; } |
| 717 | RGBA_XFERMODE(plus_) { return s + d; } |
| 718 | RGBA_XFERMODE(screen) { return s + d - s*d; } |
| 719 | RGBA_XFERMODE(xor_) { return s*inv(da) + d*inv(sa); } |
| 720 | |
| 721 | RGB_XFERMODE(colorburn) { |
| 722 | return (d == da ).thenElse(d + s*inv(da), |
| 723 | (s == 0.0f).thenElse(s + d*inv(sa), |
| 724 | sa*(da - SkNf::Min(da, (da-d)*sa/s)) + s*inv(da) + d*inv(sa))); |
| 725 | } |
| 726 | RGB_XFERMODE(colordodge) { |
| 727 | return (d == 0.0f).thenElse(d + s*inv(da), |
| 728 | (s == sa ).thenElse(s + d*inv(sa), |
| 729 | sa*SkNf::Min(da, (d*sa)/(sa - s)) + s*inv(da) + d*inv(sa))); |
| 730 | } |
| 731 | RGB_XFERMODE(darken) { return s + d - SkNf::Max(s*da, d*sa); } |
| 732 | RGB_XFERMODE(difference) { return s + d - 2.0f*SkNf::Min(s*da,d*sa); } |
| 733 | RGB_XFERMODE(exclusion) { return s + d - 2.0f*s*d; } |
| 734 | RGB_XFERMODE(hardlight) { |
| 735 | return s*inv(da) + d*inv(sa) |
| 736 | + (2.0f*s <= sa).thenElse(2.0f*s*d, sa*da - 2.0f*(da-d)*(sa-s)); |
| 737 | } |
| 738 | RGB_XFERMODE(lighten) { return s + d - SkNf::Min(s*da, d*sa); } |
| 739 | RGB_XFERMODE(overlay) { return hardlight_kernel(d,da,s,sa); } |
| 740 | RGB_XFERMODE(softlight) { |
| 741 | SkNf m = (da > 0.0f).thenElse(d / da, 0.0f), |
| 742 | s2 = 2.0f*s, |
| 743 | m4 = 4.0f*m; |
| 744 | |
| 745 | // The logic forks three ways: |
| 746 | // 1. dark src? |
| 747 | // 2. light src, dark dst? |
| 748 | // 3. light src, light dst? |
| 749 | SkNf darkSrc = d*(sa + (s2 - sa)*(1.0f - m)), // Used in case 1. |
| 750 | darkDst = (m4*m4 + m4)*(m - 1.0f) + 7.0f*m, // Used in case 2. |
| 751 | liteDst = m.rsqrt().invert() - m, // Used in case 3. |
| 752 | liteSrc = d*sa + da*(s2 - sa) * (4.0f*d <= da).thenElse(darkDst, liteDst); // 2 or 3? |
| 753 | return s*inv(da) + d*inv(sa) + (s2 <= sa).thenElse(darkSrc, liteSrc); // 1 or (2 or 3)? |
| 754 | } |
| 755 | |
Mike Klein | 2cbc33d | 2016-11-28 16:30:30 -0500 | [diff] [blame] | 756 | STAGE(luminance_to_alpha) { |
Mike Klein | 1f49f26 | 2016-10-31 19:49:27 -0400 | [diff] [blame] | 757 | a = SK_LUM_COEFF_R*r + SK_LUM_COEFF_G*g + SK_LUM_COEFF_B*b; |
| 758 | r = g = b = 0; |
| 759 | } |
| 760 | |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 761 | STAGE_CTX(matrix_2x3, const float*) { |
| 762 | auto m = ctx; |
Mike Klein | 06a65e2 | 2016-11-17 12:39:09 -0500 | [diff] [blame] | 763 | |
Mike Klein | 87185f7 | 2016-12-01 18:22:26 -0500 | [diff] [blame] | 764 | auto R = SkNf_fma(r,m[0], SkNf_fma(g,m[2], m[4])), |
| 765 | G = SkNf_fma(r,m[1], SkNf_fma(g,m[3], m[5])); |
Mike Klein | 06a65e2 | 2016-11-17 12:39:09 -0500 | [diff] [blame] | 766 | r = R; |
| 767 | g = G; |
| 768 | } |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 769 | STAGE_CTX(matrix_3x4, const float*) { |
| 770 | auto m = ctx; |
raftias | 2563601 | 2016-11-11 15:27:39 -0800 | [diff] [blame] | 771 | |
Mike Klein | 87185f7 | 2016-12-01 18:22:26 -0500 | [diff] [blame] | 772 | auto R = SkNf_fma(r,m[0], SkNf_fma(g,m[3], SkNf_fma(b,m[6], m[ 9]))), |
| 773 | G = SkNf_fma(r,m[1], SkNf_fma(g,m[4], SkNf_fma(b,m[7], m[10]))), |
| 774 | B = SkNf_fma(r,m[2], SkNf_fma(g,m[5], SkNf_fma(b,m[8], m[11]))); |
raftias | 2563601 | 2016-11-11 15:27:39 -0800 | [diff] [blame] | 775 | r = R; |
| 776 | g = G; |
| 777 | b = B; |
| 778 | } |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 779 | STAGE_CTX(matrix_4x5, const float*) { |
| 780 | auto m = ctx; |
Mike Klein | eea7c16 | 2016-11-03 10:20:35 -0400 | [diff] [blame] | 781 | |
Mike Klein | 87185f7 | 2016-12-01 18:22:26 -0500 | [diff] [blame] | 782 | auto R = SkNf_fma(r,m[0], SkNf_fma(g,m[4], SkNf_fma(b,m[ 8], SkNf_fma(a,m[12], m[16])))), |
| 783 | G = SkNf_fma(r,m[1], SkNf_fma(g,m[5], SkNf_fma(b,m[ 9], SkNf_fma(a,m[13], m[17])))), |
| 784 | B = SkNf_fma(r,m[2], SkNf_fma(g,m[6], SkNf_fma(b,m[10], SkNf_fma(a,m[14], m[18])))), |
| 785 | A = SkNf_fma(r,m[3], SkNf_fma(g,m[7], SkNf_fma(b,m[11], SkNf_fma(a,m[15], m[19])))); |
Mike Klein | eea7c16 | 2016-11-03 10:20:35 -0400 | [diff] [blame] | 786 | r = R; |
| 787 | g = G; |
| 788 | b = B; |
| 789 | a = A; |
| 790 | } |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 791 | STAGE_CTX(matrix_perspective, const float*) { |
Mike Klein | c01e7df | 2016-11-17 16:27:10 -0500 | [diff] [blame] | 792 | // N.B. unlike the matrix_NxM stages, this takes a row-major matrix. |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 793 | auto m = ctx; |
Mike Klein | c01e7df | 2016-11-17 16:27:10 -0500 | [diff] [blame] | 794 | |
Mike Klein | 87185f7 | 2016-12-01 18:22:26 -0500 | [diff] [blame] | 795 | auto R = SkNf_fma(r,m[0], SkNf_fma(g,m[1], m[2])), |
| 796 | G = SkNf_fma(r,m[3], SkNf_fma(g,m[4], m[5])), |
| 797 | Z = SkNf_fma(r,m[6], SkNf_fma(g,m[7], m[8])); |
Mike Klein | c01e7df | 2016-11-17 16:27:10 -0500 | [diff] [blame] | 798 | r = R * Z.invert(); |
| 799 | g = G * Z.invert(); |
| 800 | } |
| 801 | |
Mike Klein | cfcf624 | 2016-11-16 09:01:30 -0500 | [diff] [blame] | 802 | SI SkNf parametric(const SkNf& v, const SkColorSpaceTransferFn& p) { |
| 803 | float result[N]; // Unconstrained powf() doesn't vectorize well... |
| 804 | for (int i = 0; i < N; i++) { |
| 805 | float s = v[i]; |
Matt Sarett | 2410717 | 2016-12-19 14:33:35 -0500 | [diff] [blame] | 806 | result[i] = (s <= p.fD) ? p.fC * s + p.fF |
| 807 | : powf(s * p.fA + p.fB, p.fG) + p.fE; |
Mike Klein | cfcf624 | 2016-11-16 09:01:30 -0500 | [diff] [blame] | 808 | } |
raftias | 2979b1a | 2016-12-05 16:30:41 -0500 | [diff] [blame] | 809 | // Clamp the output to [0, 1]. |
| 810 | // Max(NaN, 0) = 0, but Max(0, NaN) = NaN, so we want this exact order to ensure NaN => 0 |
| 811 | return SkNf::Min(SkNf::Max(SkNf::Load(result), 0.0f), 1.0f); |
Mike Klein | cfcf624 | 2016-11-16 09:01:30 -0500 | [diff] [blame] | 812 | } |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 813 | STAGE_CTX(parametric_r, const SkColorSpaceTransferFn*) { r = parametric(r, *ctx); } |
| 814 | STAGE_CTX(parametric_g, const SkColorSpaceTransferFn*) { g = parametric(g, *ctx); } |
| 815 | STAGE_CTX(parametric_b, const SkColorSpaceTransferFn*) { b = parametric(b, *ctx); } |
| 816 | STAGE_CTX(parametric_a, const SkColorSpaceTransferFn*) { a = parametric(a, *ctx); } |
Mike Klein | cfcf624 | 2016-11-16 09:01:30 -0500 | [diff] [blame] | 817 | |
Matt Sarett | db4d406 | 2016-11-16 16:07:15 -0500 | [diff] [blame] | 818 | SI SkNf table(const SkNf& v, const SkTableTransferFn& table) { |
| 819 | float result[N]; |
Mike Klein | cfcf624 | 2016-11-16 09:01:30 -0500 | [diff] [blame] | 820 | for (int i = 0; i < N; i++) { |
Matt Sarett | db4d406 | 2016-11-16 16:07:15 -0500 | [diff] [blame] | 821 | result[i] = interp_lut(v[i], table.fData, table.fSize); |
Mike Klein | cfcf624 | 2016-11-16 09:01:30 -0500 | [diff] [blame] | 822 | } |
raftias | 2979b1a | 2016-12-05 16:30:41 -0500 | [diff] [blame] | 823 | // no need to clamp - tables are by-design [0,1] -> [0,1] |
Mike Klein | cfcf624 | 2016-11-16 09:01:30 -0500 | [diff] [blame] | 824 | return SkNf::Load(result); |
| 825 | } |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 826 | STAGE_CTX(table_r, const SkTableTransferFn*) { r = table(r, *ctx); } |
| 827 | STAGE_CTX(table_g, const SkTableTransferFn*) { g = table(g, *ctx); } |
| 828 | STAGE_CTX(table_b, const SkTableTransferFn*) { b = table(b, *ctx); } |
| 829 | STAGE_CTX(table_a, const SkTableTransferFn*) { a = table(a, *ctx); } |
raftias | 2563601 | 2016-11-11 15:27:39 -0800 | [diff] [blame] | 830 | |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 831 | STAGE_CTX(color_lookup_table, const SkColorLookUpTable*) { |
| 832 | const SkColorLookUpTable* colorLUT = ctx; |
raftias | 5476128 | 2016-12-01 13:44:07 -0500 | [diff] [blame] | 833 | SkASSERT(3 == colorLUT->inputChannels() || 4 == colorLUT->inputChannels()); |
| 834 | SkASSERT(3 == colorLUT->outputChannels()); |
raftias | 2563601 | 2016-11-11 15:27:39 -0800 | [diff] [blame] | 835 | float result[3][N]; |
| 836 | for (int i = 0; i < N; ++i) { |
raftias | 5476128 | 2016-12-01 13:44:07 -0500 | [diff] [blame] | 837 | const float in[4] = { r[i], g[i], b[i], a[i] }; |
| 838 | float out[3]; |
| 839 | colorLUT->interp(out, in); |
| 840 | for (int j = 0; j < colorLUT->outputChannels(); ++j) { |
| 841 | result[j][i] = out[j]; |
| 842 | } |
raftias | 2563601 | 2016-11-11 15:27:39 -0800 | [diff] [blame] | 843 | } |
| 844 | r = SkNf::Load(result[0]); |
| 845 | g = SkNf::Load(result[1]); |
| 846 | b = SkNf::Load(result[2]); |
raftias | 5476128 | 2016-12-01 13:44:07 -0500 | [diff] [blame] | 847 | if (4 == colorLUT->inputChannels()) { |
| 848 | // we must set the pixel to opaque, as the alpha channel was used |
| 849 | // as input before this. |
| 850 | a = 1.f; |
| 851 | } |
raftias | 2563601 | 2016-11-11 15:27:39 -0800 | [diff] [blame] | 852 | } |
| 853 | |
Mike Klein | 2cbc33d | 2016-11-28 16:30:30 -0500 | [diff] [blame] | 854 | STAGE(lab_to_xyz) { |
raftias | 2563601 | 2016-11-11 15:27:39 -0800 | [diff] [blame] | 855 | const auto lab_l = r * 100.0f; |
| 856 | const auto lab_a = g * 255.0f - 128.0f; |
| 857 | const auto lab_b = b * 255.0f - 128.0f; |
| 858 | auto Y = (lab_l + 16.0f) * (1/116.0f); |
| 859 | auto X = lab_a * (1/500.0f) + Y; |
| 860 | auto Z = Y - (lab_b * (1/200.0f)); |
| 861 | |
| 862 | const auto X3 = X*X*X; |
| 863 | X = (X3 > 0.008856f).thenElse(X3, (X - (16/116.0f)) * (1/7.787f)); |
| 864 | const auto Y3 = Y*Y*Y; |
| 865 | Y = (Y3 > 0.008856f).thenElse(Y3, (Y - (16/116.0f)) * (1/7.787f)); |
| 866 | const auto Z3 = Z*Z*Z; |
| 867 | Z = (Z3 > 0.008856f).thenElse(Z3, (Z - (16/116.0f)) * (1/7.787f)); |
| 868 | |
| 869 | // adjust to D50 illuminant |
| 870 | X *= 0.96422f; |
| 871 | Y *= 1.00000f; |
| 872 | Z *= 0.82521f; |
| 873 | |
| 874 | r = X; |
| 875 | g = Y; |
| 876 | b = Z; |
| 877 | } |
| 878 | |
Mike Klein | b273fc4 | 2016-11-17 15:42:22 -0500 | [diff] [blame] | 879 | SI SkNf assert_in_tile(const SkNf& v, float limit) { |
| 880 | for (int i = 0; i < N; i++) { |
| 881 | SkASSERT(0 <= v[i] && v[i] < limit); |
| 882 | } |
| 883 | return v; |
Mike Klein | 06a65e2 | 2016-11-17 12:39:09 -0500 | [diff] [blame] | 884 | } |
Mike Klein | b273fc4 | 2016-11-17 15:42:22 -0500 | [diff] [blame] | 885 | |
Florin Malita | 5b2f579 | 2017-01-20 14:53:03 -0500 | [diff] [blame] | 886 | SI SkNf ulp_before(float v) { |
| 887 | SkASSERT(v > 0); |
| 888 | SkNf vs(v); |
| 889 | SkNu uvs = SkNu::Load(&vs) - 1; |
| 890 | return SkNf::Load(&uvs); |
| 891 | } |
| 892 | |
Mike Klein | b273fc4 | 2016-11-17 15:42:22 -0500 | [diff] [blame] | 893 | SI SkNf clamp(const SkNf& v, float limit) { |
Florin Malita | 5b2f579 | 2017-01-20 14:53:03 -0500 | [diff] [blame] | 894 | SkNf result = SkNf::Max(0, SkNf::Min(v, ulp_before(limit))); |
Mike Klein | b273fc4 | 2016-11-17 15:42:22 -0500 | [diff] [blame] | 895 | return assert_in_tile(result, limit); |
Mike Klein | 06a65e2 | 2016-11-17 12:39:09 -0500 | [diff] [blame] | 896 | } |
Mike Klein | b273fc4 | 2016-11-17 15:42:22 -0500 | [diff] [blame] | 897 | SI SkNf repeat(const SkNf& v, float limit) { |
| 898 | SkNf result = v - (v/limit).floor()*limit; |
Mike Klein | b273fc4 | 2016-11-17 15:42:22 -0500 | [diff] [blame] | 899 | // For small negative v, (v/limit).floor()*limit can dominate v in the subtraction, |
| 900 | // which leaves result == limit. We want result < limit, so clamp it one ULP. |
Florin Malita | 5b2f579 | 2017-01-20 14:53:03 -0500 | [diff] [blame] | 901 | result = SkNf::Min(result, ulp_before(limit)); |
Mike Klein | b273fc4 | 2016-11-17 15:42:22 -0500 | [diff] [blame] | 902 | return assert_in_tile(result, limit); |
| 903 | } |
Mike Klein | 2e35e8a | 2016-11-18 15:47:22 -0500 | [diff] [blame] | 904 | SI SkNf mirror(const SkNf& v, float l/*imit*/) { |
| 905 | SkNf result = ((v - l) - ((v - l) / (2*l)).floor()*(2*l) - l).abs(); |
| 906 | // Same deal as repeat. |
Florin Malita | 5b2f579 | 2017-01-20 14:53:03 -0500 | [diff] [blame] | 907 | result = SkNf::Min(result, ulp_before(l)); |
Mike Klein | 2e35e8a | 2016-11-18 15:47:22 -0500 | [diff] [blame] | 908 | return assert_in_tile(result, l); |
| 909 | } |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 910 | STAGE_CTX( clamp_x, const float*) { r = clamp (r, *ctx); } |
| 911 | STAGE_CTX(repeat_x, const float*) { r = repeat(r, *ctx); } |
| 912 | STAGE_CTX(mirror_x, const float*) { r = mirror(r, *ctx); } |
| 913 | STAGE_CTX( clamp_y, const float*) { g = clamp (g, *ctx); } |
| 914 | STAGE_CTX(repeat_y, const float*) { g = repeat(g, *ctx); } |
| 915 | STAGE_CTX(mirror_y, const float*) { g = mirror(g, *ctx); } |
Mike Klein | 06a65e2 | 2016-11-17 12:39:09 -0500 | [diff] [blame] | 916 | |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 917 | STAGE_CTX(save_xy, SkImageShaderContext*) { |
| 918 | r.store(ctx->x); |
| 919 | g.store(ctx->y); |
Mike Klein | 46e66a2 | 2016-11-21 16:19:34 -0500 | [diff] [blame] | 920 | |
Mike Klein | b0b17d1 | 2016-12-09 16:25:44 -0500 | [diff] [blame] | 921 | // Whether bilinear or bicubic, all sample points have the same fractional offset (fx,fy). |
| 922 | // They're either the 4 corners of a logical 1x1 pixel or the 16 corners of a 3x3 grid |
| 923 | // surrounding (x,y), all (0.5,0.5) off-center. |
Mike Klein | 886cf53 | 2016-12-06 11:31:25 -0500 | [diff] [blame] | 924 | auto fract = [](const SkNf& v) { return v - v.floor(); }; |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 925 | fract(r + 0.5f).store(ctx->fx); |
| 926 | fract(g + 0.5f).store(ctx->fy); |
Mike Klein | 886cf53 | 2016-12-06 11:31:25 -0500 | [diff] [blame] | 927 | } |
Mike Klein | 46e66a2 | 2016-11-21 16:19:34 -0500 | [diff] [blame] | 928 | |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 929 | STAGE_CTX(accumulate, const SkImageShaderContext*) { |
Mike Klein | b0b17d1 | 2016-12-09 16:25:44 -0500 | [diff] [blame] | 930 | // Bilinear and bicubic filtering are both separable, so we'll end up with independent |
| 931 | // scale contributions in x and y that we multiply together to get each pixel's scale factor. |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 932 | auto scale = SkNf::Load(ctx->scalex) * SkNf::Load(ctx->scaley); |
Mike Klein | 87185f7 | 2016-12-01 18:22:26 -0500 | [diff] [blame] | 933 | dr = SkNf_fma(scale, r, dr); |
| 934 | dg = SkNf_fma(scale, g, dg); |
| 935 | db = SkNf_fma(scale, b, db); |
| 936 | da = SkNf_fma(scale, a, da); |
Mike Klein | b04c352 | 2016-11-28 11:55:58 -0500 | [diff] [blame] | 937 | } |
| 938 | |
Mike Klein | b0b17d1 | 2016-12-09 16:25:44 -0500 | [diff] [blame] | 939 | // In bilinear interpolation, the 4 pixels at +/- 0.5 offsets from the sample pixel center |
| 940 | // are combined in direct proportion to their area overlapping that logical query pixel. |
| 941 | // At positive offsets, the x-axis contribution to that rectangular area is fx; (1-fx) |
| 942 | // at negative x offsets. The y-axis is treated symmetrically. |
| 943 | template <int Scale> |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 944 | SI void bilinear_x(SkImageShaderContext* ctx, SkNf* x) { |
| 945 | *x = SkNf::Load(ctx->x) + Scale*0.5f; |
| 946 | auto fx = SkNf::Load(ctx->fx); |
| 947 | (Scale > 0 ? fx : (1.0f - fx)).store(ctx->scalex); |
Mike Klein | b0b17d1 | 2016-12-09 16:25:44 -0500 | [diff] [blame] | 948 | } |
| 949 | template <int Scale> |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 950 | SI void bilinear_y(SkImageShaderContext* ctx, SkNf* y) { |
| 951 | *y = SkNf::Load(ctx->y) + Scale*0.5f; |
| 952 | auto fy = SkNf::Load(ctx->fy); |
| 953 | (Scale > 0 ? fy : (1.0f - fy)).store(ctx->scaley); |
Mike Klein | b0b17d1 | 2016-12-09 16:25:44 -0500 | [diff] [blame] | 954 | } |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 955 | STAGE_CTX(bilinear_nx, SkImageShaderContext*) { bilinear_x<-1>(ctx, &r); } |
| 956 | STAGE_CTX(bilinear_px, SkImageShaderContext*) { bilinear_x<+1>(ctx, &r); } |
| 957 | STAGE_CTX(bilinear_ny, SkImageShaderContext*) { bilinear_y<-1>(ctx, &g); } |
| 958 | STAGE_CTX(bilinear_py, SkImageShaderContext*) { bilinear_y<+1>(ctx, &g); } |
Mike Klein | b0b17d1 | 2016-12-09 16:25:44 -0500 | [diff] [blame] | 959 | |
| 960 | |
| 961 | // In bilinear interpolation, the 16 pixels at +/- 0.5 and +/- 1.5 offsets from the sample |
| 962 | // pixel center are combined with a non-uniform cubic filter, with high filter values near |
| 963 | // the center and lower values farther away. |
| 964 | // |
| 965 | // We break this filter function into two parts, one for near +/- 0.5 offsets, |
| 966 | // and one for far +/- 1.5 offsets. |
| 967 | // |
| 968 | // See GrBicubicEffect for details about this particular Mitchell-Netravali filter. |
| 969 | SI SkNf bicubic_near(const SkNf& t) { |
| 970 | // 1/18 + 9/18t + 27/18t^2 - 21/18t^3 == t ( t ( -21/18t + 27/18) + 9/18) + 1/18 |
| 971 | return SkNf_fma(t, SkNf_fma(t, SkNf_fma(-21/18.0f, t, 27/18.0f), 9/18.0f), 1/18.0f); |
| 972 | } |
| 973 | SI SkNf bicubic_far(const SkNf& t) { |
| 974 | // 0/18 + 0/18*t - 6/18t^2 + 7/18t^3 == t^2 (7/18t - 6/18) |
| 975 | return (t*t)*SkNf_fma(7/18.0f, t, -6/18.0f); |
| 976 | } |
| 977 | |
| 978 | template <int Scale> |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 979 | SI void bicubic_x(SkImageShaderContext* ctx, SkNf* x) { |
| 980 | *x = SkNf::Load(ctx->x) + Scale*0.5f; |
| 981 | auto fx = SkNf::Load(ctx->fx); |
| 982 | if (Scale == -3) { return bicubic_far (1.0f - fx).store(ctx->scalex); } |
| 983 | if (Scale == -1) { return bicubic_near(1.0f - fx).store(ctx->scalex); } |
| 984 | if (Scale == +1) { return bicubic_near( fx).store(ctx->scalex); } |
| 985 | if (Scale == +3) { return bicubic_far ( fx).store(ctx->scalex); } |
Mike Klein | b0b17d1 | 2016-12-09 16:25:44 -0500 | [diff] [blame] | 986 | SkDEBUGFAIL("unreachable"); |
| 987 | } |
| 988 | template <int Scale> |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 989 | SI void bicubic_y(SkImageShaderContext* ctx, SkNf* y) { |
| 990 | *y = SkNf::Load(ctx->y) + Scale*0.5f; |
| 991 | auto fy = SkNf::Load(ctx->fy); |
| 992 | if (Scale == -3) { return bicubic_far (1.0f - fy).store(ctx->scaley); } |
| 993 | if (Scale == -1) { return bicubic_near(1.0f - fy).store(ctx->scaley); } |
| 994 | if (Scale == +1) { return bicubic_near( fy).store(ctx->scaley); } |
| 995 | if (Scale == +3) { return bicubic_far ( fy).store(ctx->scaley); } |
Mike Klein | b0b17d1 | 2016-12-09 16:25:44 -0500 | [diff] [blame] | 996 | SkDEBUGFAIL("unreachable"); |
| 997 | } |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 998 | STAGE_CTX(bicubic_n3x, SkImageShaderContext*) { bicubic_x<-3>(ctx, &r); } |
| 999 | STAGE_CTX(bicubic_n1x, SkImageShaderContext*) { bicubic_x<-1>(ctx, &r); } |
| 1000 | STAGE_CTX(bicubic_p1x, SkImageShaderContext*) { bicubic_x<+1>(ctx, &r); } |
| 1001 | STAGE_CTX(bicubic_p3x, SkImageShaderContext*) { bicubic_x<+3>(ctx, &r); } |
Mike Klein | b0b17d1 | 2016-12-09 16:25:44 -0500 | [diff] [blame] | 1002 | |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 1003 | STAGE_CTX(bicubic_n3y, SkImageShaderContext*) { bicubic_y<-3>(ctx, &g); } |
| 1004 | STAGE_CTX(bicubic_n1y, SkImageShaderContext*) { bicubic_y<-1>(ctx, &g); } |
| 1005 | STAGE_CTX(bicubic_p1y, SkImageShaderContext*) { bicubic_y<+1>(ctx, &g); } |
| 1006 | STAGE_CTX(bicubic_p3y, SkImageShaderContext*) { bicubic_y<+3>(ctx, &g); } |
Mike Klein | b0b17d1 | 2016-12-09 16:25:44 -0500 | [diff] [blame] | 1007 | |
| 1008 | |
Mike Klein | cb2c12b | 2016-11-22 13:22:48 -0500 | [diff] [blame] | 1009 | template <typename T> |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 1010 | SI SkNi offset_and_ptr(T** ptr, const SkImageShaderContext* ctx, const SkNf& x, const SkNf& y) { |
Mike Klein | cb2c12b | 2016-11-22 13:22:48 -0500 | [diff] [blame] | 1011 | SkNi ix = SkNx_cast<int>(x), |
| 1012 | iy = SkNx_cast<int>(y); |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 1013 | SkNi offset = iy*ctx->stride + ix; |
Mike Klein | 06a65e2 | 2016-11-17 12:39:09 -0500 | [diff] [blame] | 1014 | |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 1015 | *ptr = (const T*)ctx->pixels; |
Mike Klein | cb2c12b | 2016-11-22 13:22:48 -0500 | [diff] [blame] | 1016 | return offset; |
| 1017 | } |
| 1018 | |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 1019 | STAGE_CTX(gather_a8, const SkImageShaderContext*) { |
Mike Klein | 7a14734d | 2016-11-29 15:33:39 -0500 | [diff] [blame] | 1020 | const uint8_t* p; |
| 1021 | SkNi offset = offset_and_ptr(&p, ctx, r, g); |
| 1022 | |
| 1023 | r = g = b = 0.0f; |
Mike Klein | e2e2ae2 | 2016-12-02 15:21:03 -0500 | [diff] [blame] | 1024 | a = SkNf_from_byte(gather(tail, p, offset)); |
Mike Klein | 7a14734d | 2016-11-29 15:33:39 -0500 | [diff] [blame] | 1025 | } |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 1026 | STAGE_CTX(gather_i8, const SkImageShaderContext*) { |
Mike Klein | f7657e9 | 2016-11-29 12:57:22 -0500 | [diff] [blame] | 1027 | const uint8_t* p; |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 1028 | SkNi offset = offset_and_ptr(&p, ctx, r, g); |
Mike Klein | f7657e9 | 2016-11-29 12:57:22 -0500 | [diff] [blame] | 1029 | |
| 1030 | SkNi ix = SkNx_cast<int>(gather(tail, p, offset)); |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 1031 | from_8888(gather(tail, ctx->ctable->readColors(), ix), &r, &g, &b, &a); |
Mike Klein | f7657e9 | 2016-11-29 12:57:22 -0500 | [diff] [blame] | 1032 | } |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 1033 | STAGE_CTX(gather_g8, const SkImageShaderContext*) { |
Mike Klein | 6b77f1c | 2016-11-22 15:50:12 -0500 | [diff] [blame] | 1034 | const uint8_t* p; |
| 1035 | SkNi offset = offset_and_ptr(&p, ctx, r, g); |
| 1036 | |
Mike Klein | e2e2ae2 | 2016-12-02 15:21:03 -0500 | [diff] [blame] | 1037 | r = g = b = SkNf_from_byte(gather(tail, p, offset)); |
Mike Klein | b04c352 | 2016-11-28 11:55:58 -0500 | [diff] [blame] | 1038 | a = 1.0f; |
Mike Klein | 6b77f1c | 2016-11-22 15:50:12 -0500 | [diff] [blame] | 1039 | } |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 1040 | STAGE_CTX(gather_565, const SkImageShaderContext*) { |
Mike Klein | cb2c12b | 2016-11-22 13:22:48 -0500 | [diff] [blame] | 1041 | const uint16_t* p; |
| 1042 | SkNi offset = offset_and_ptr(&p, ctx, r, g); |
| 1043 | |
Mike Klein | 56b5079 | 2016-11-29 08:14:49 -0500 | [diff] [blame] | 1044 | from_565(gather(tail, p, offset), &r, &g, &b); |
Mike Klein | b04c352 | 2016-11-28 11:55:58 -0500 | [diff] [blame] | 1045 | a = 1.0f; |
Mike Klein | cb2c12b | 2016-11-22 13:22:48 -0500 | [diff] [blame] | 1046 | } |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 1047 | STAGE_CTX(gather_4444, const SkImageShaderContext*) { |
Mike Klein | cb5338c | 2016-11-22 14:58:45 -0500 | [diff] [blame] | 1048 | const uint16_t* p; |
| 1049 | SkNi offset = offset_and_ptr(&p, ctx, r, g); |
| 1050 | |
Mike Klein | 56b5079 | 2016-11-29 08:14:49 -0500 | [diff] [blame] | 1051 | from_4444(gather(tail, p, offset), &r, &g, &b, &a); |
Mike Klein | cb5338c | 2016-11-22 14:58:45 -0500 | [diff] [blame] | 1052 | } |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 1053 | STAGE_CTX(gather_8888, const SkImageShaderContext*) { |
Mike Klein | cb5338c | 2016-11-22 14:58:45 -0500 | [diff] [blame] | 1054 | const uint32_t* p; |
| 1055 | SkNi offset = offset_and_ptr(&p, ctx, r, g); |
| 1056 | |
Mike Klein | 56b5079 | 2016-11-29 08:14:49 -0500 | [diff] [blame] | 1057 | from_8888(gather(tail, p, offset), &r, &g, &b, &a); |
Mike Klein | cb5338c | 2016-11-22 14:58:45 -0500 | [diff] [blame] | 1058 | } |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 1059 | STAGE_CTX(gather_f16, const SkImageShaderContext*) { |
Mike Klein | cb2c12b | 2016-11-22 13:22:48 -0500 | [diff] [blame] | 1060 | const uint64_t* p; |
| 1061 | SkNi offset = offset_and_ptr(&p, ctx, r, g); |
| 1062 | |
Mike Klein | 4958006 | 2016-12-11 11:42:07 -0500 | [diff] [blame] | 1063 | auto px = gather(tail, p, offset); |
| 1064 | from_f16(&px, &r, &g, &b, &a); |
Mike Klein | cb2c12b | 2016-11-22 13:22:48 -0500 | [diff] [blame] | 1065 | } |
| 1066 | |
Florin Malita | c86e470 | 2017-01-20 08:41:34 -0500 | [diff] [blame] | 1067 | STAGE_CTX(linear_gradient_2stops, const SkPM4f*) { |
| 1068 | auto t = r; |
| 1069 | SkPM4f c0 = ctx[0], |
| 1070 | dc = ctx[1]; |
| 1071 | |
| 1072 | r = SkNf_fma(t, dc.r(), c0.r()); |
| 1073 | g = SkNf_fma(t, dc.g(), c0.g()); |
| 1074 | b = SkNf_fma(t, dc.b(), c0.b()); |
| 1075 | a = SkNf_fma(t, dc.a(), c0.a()); |
| 1076 | } |
Mike Klein | 06a65e2 | 2016-11-17 12:39:09 -0500 | [diff] [blame] | 1077 | |
Mike Klein | a9e8ef0 | 2017-01-23 14:06:55 -0500 | [diff] [blame] | 1078 | STAGE_CTX(byte_tables, const void*) { |
| 1079 | struct Tables { const uint8_t *r, *g, *b, *a; }; |
| 1080 | auto tables = (const Tables*)ctx; |
| 1081 | |
| 1082 | r = SkNf_from_byte(gather(tail, tables->r, SkNf_round(255.0f, r))); |
| 1083 | g = SkNf_from_byte(gather(tail, tables->g, SkNf_round(255.0f, g))); |
| 1084 | b = SkNf_from_byte(gather(tail, tables->b, SkNf_round(255.0f, b))); |
| 1085 | a = SkNf_from_byte(gather(tail, tables->a, SkNf_round(255.0f, a))); |
| 1086 | } |
| 1087 | |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 1088 | SI Fn enum_to_Fn(SkRasterPipeline::StockStage st) { |
| 1089 | switch (st) { |
| 1090 | #define M(stage) case SkRasterPipeline::stage: return stage; |
| 1091 | SK_RASTER_PIPELINE_STAGES(M) |
| 1092 | #undef M |
| 1093 | } |
| 1094 | SkASSERT(false); |
| 1095 | return just_return; |
| 1096 | } |
Mike Klein | 9161ef0 | 2016-10-04 14:03:27 -0400 | [diff] [blame] | 1097 | |
Mike Klein | 0c32496 | 2016-12-01 14:05:38 -0500 | [diff] [blame] | 1098 | namespace { |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 1099 | |
Mike Klein | a2d25ec | 2017-01-05 15:03:53 -0500 | [diff] [blame] | 1100 | static void build_program(void** program, const SkRasterPipeline::Stage* stages, int nstages) { |
| 1101 | for (int i = 0; i < nstages; i++) { |
| 1102 | *program++ = (void*)enum_to_Fn(stages[i].stage); |
| 1103 | if (stages[i].ctx) { |
| 1104 | *program++ = stages[i].ctx; |
| 1105 | } |
| 1106 | } |
| 1107 | *program++ = (void*)just_return; |
| 1108 | } |
| 1109 | |
Mike Klein | 319ba3d | 2017-01-20 15:11:54 -0500 | [diff] [blame] | 1110 | static void run_program(void** program, size_t x, size_t n) { |
| 1111 | SkNf u; // fastest to start uninitialized. |
Mike Klein | a2d25ec | 2017-01-05 15:03:53 -0500 | [diff] [blame] | 1112 | |
| 1113 | auto start = (Fn)load_and_increment(&program); |
| 1114 | while (n >= N) { |
Mike Klein | 319ba3d | 2017-01-20 15:11:54 -0500 | [diff] [blame] | 1115 | start(x*N, program, u,u,u,u, u,u,u,u); |
Mike Klein | a2d25ec | 2017-01-05 15:03:53 -0500 | [diff] [blame] | 1116 | x += N; |
| 1117 | n -= N; |
| 1118 | } |
| 1119 | if (n) { |
Mike Klein | 319ba3d | 2017-01-20 15:11:54 -0500 | [diff] [blame] | 1120 | start(x*N+n, program, u,u,u,u, u,u,u,u); |
Mike Klein | a2d25ec | 2017-01-05 15:03:53 -0500 | [diff] [blame] | 1121 | } |
| 1122 | } |
| 1123 | |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 1124 | // Compiled manages its memory manually because it's not safe to use |
| 1125 | // std::vector, SkTDArray, etc without setting us up for big ODR violations. |
Mike Klein | 0c32496 | 2016-12-01 14:05:38 -0500 | [diff] [blame] | 1126 | struct Compiled { |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 1127 | Compiled(const SkRasterPipeline::Stage* stages, int nstages) { |
| 1128 | int slots = nstages + 1; // One extra for just_return. |
| 1129 | for (int i = 0; i < nstages; i++) { |
| 1130 | if (stages[i].ctx) { |
| 1131 | slots++; |
| 1132 | } |
Mike Klein | 0c32496 | 2016-12-01 14:05:38 -0500 | [diff] [blame] | 1133 | } |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 1134 | fProgram = (void**)sk_malloc_throw(slots * sizeof(void*)); |
Mike Klein | a2d25ec | 2017-01-05 15:03:53 -0500 | [diff] [blame] | 1135 | build_program(fProgram, stages, nstages); |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 1136 | } |
| 1137 | ~Compiled() { sk_free(fProgram); } |
| 1138 | |
| 1139 | Compiled(const Compiled& o) { |
| 1140 | int slots = 0; |
| 1141 | while (o.fProgram[slots++] != (void*)just_return); |
| 1142 | |
| 1143 | fProgram = (void**)sk_malloc_throw(slots * sizeof(void*)); |
| 1144 | memcpy(fProgram, o.fProgram, slots * sizeof(void*)); |
Mike Klein | 0c32496 | 2016-12-01 14:05:38 -0500 | [diff] [blame] | 1145 | } |
| 1146 | |
Mike Klein | 319ba3d | 2017-01-20 15:11:54 -0500 | [diff] [blame] | 1147 | void operator()(size_t x, size_t n) { |
| 1148 | run_program(fProgram, x, n); |
Mike Klein | 0c32496 | 2016-12-01 14:05:38 -0500 | [diff] [blame] | 1149 | } |
| 1150 | |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 1151 | void** fProgram; |
Mike Klein | 0c32496 | 2016-12-01 14:05:38 -0500 | [diff] [blame] | 1152 | }; |
| 1153 | } |
| 1154 | |
Mike Klein | baaf8ad | 2016-09-29 09:04:15 -0400 | [diff] [blame] | 1155 | namespace SK_OPTS_NS { |
| 1156 | |
Mike Klein | 319ba3d | 2017-01-20 15:11:54 -0500 | [diff] [blame] | 1157 | SI std::function<void(size_t, size_t)> |
Mike Klein | af49b19 | 2016-11-15 08:52:04 -0500 | [diff] [blame] | 1158 | compile_pipeline(const SkRasterPipeline::Stage* stages, int nstages) { |
Mike Klein | 0c32496 | 2016-12-01 14:05:38 -0500 | [diff] [blame] | 1159 | return Compiled{stages,nstages}; |
Mike Klein | baaf8ad | 2016-09-29 09:04:15 -0400 | [diff] [blame] | 1160 | } |
| 1161 | |
Mike Klein | 319ba3d | 2017-01-20 15:11:54 -0500 | [diff] [blame] | 1162 | SI void run_pipeline(size_t x, size_t n, |
Mike Klein | c789b61 | 2016-11-30 13:45:06 -0500 | [diff] [blame] | 1163 | const SkRasterPipeline::Stage* stages, int nstages) { |
Mike Klein | a2d25ec | 2017-01-05 15:03:53 -0500 | [diff] [blame] | 1164 | static const int kStackMax = 256; |
| 1165 | // Worst case is nstages stages with nstages context pointers, and just_return. |
| 1166 | if (2*nstages+1 <= kStackMax) { |
| 1167 | void* program[kStackMax]; |
| 1168 | build_program(program, stages, nstages); |
Mike Klein | 319ba3d | 2017-01-20 15:11:54 -0500 | [diff] [blame] | 1169 | run_program(program, x,n); |
Mike Klein | a2d25ec | 2017-01-05 15:03:53 -0500 | [diff] [blame] | 1170 | } else { |
Mike Klein | 319ba3d | 2017-01-20 15:11:54 -0500 | [diff] [blame] | 1171 | Compiled{stages,nstages}(x,n); |
Mike Klein | a2d25ec | 2017-01-05 15:03:53 -0500 | [diff] [blame] | 1172 | } |
Mike Klein | c789b61 | 2016-11-30 13:45:06 -0500 | [diff] [blame] | 1173 | } |
| 1174 | |
Mike Klein | aebfb45 | 2016-10-25 10:27:33 -0400 | [diff] [blame] | 1175 | } // namespace SK_OPTS_NS |
Mike Klein | baaf8ad | 2016-09-29 09:04:15 -0400 | [diff] [blame] | 1176 | |
Mike Klein | 04adfda | 2016-10-12 09:52:55 -0400 | [diff] [blame] | 1177 | #undef SI |
| 1178 | #undef STAGE |
Mike Klein | 464e6a1 | 2017-01-04 11:04:01 -0500 | [diff] [blame] | 1179 | #undef STAGE_CTX |
Mike Klein | 04adfda | 2016-10-12 09:52:55 -0400 | [diff] [blame] | 1180 | #undef RGBA_XFERMODE |
| 1181 | #undef RGB_XFERMODE |
Mike Klein | 9161ef0 | 2016-10-04 14:03:27 -0400 | [diff] [blame] | 1182 | |
Mike Klein | baaf8ad | 2016-09-29 09:04:15 -0400 | [diff] [blame] | 1183 | #endif//SkRasterPipeline_opts_DEFINED |