blob: cdc4615849556ff185afd59f15c82e8d5a5320b5 [file] [log] [blame]
mtkleinc9adb052015-03-30 10:50:27 -07001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef SkNx_neon_DEFINED
9#define SkNx_neon_DEFINED
10
fmalitac2e0ac42015-12-03 09:15:25 -080011#define SKNX_IS_FAST
12
mtklein86c6c492016-02-09 13:46:49 -080013// ARMv8 has vrndmq_f32 to floor 4 floats. Here we emulate it:
14// - round by adding (1<<23) with our sign, then subtracting it;
15// - if that rounded value is bigger than our input, subtract 1.
16static inline float32x4_t armv7_vrndmq_f32(float32x4_t v) {
17 auto sign = vandq_u32((uint32x4_t)v, vdupq_n_u32(1<<31));
18 auto bias = (float32x4_t)(vorrq_u32((uint32x4_t)vdupq_n_f32(1<<23), sign));
19 auto rounded = vsubq_f32(vaddq_f32(v, bias), bias);
20 auto too_big = vcgtq_f32(rounded, v);
21 return vsubq_f32(rounded, (float32x4_t)vandq_u32(too_big, (uint32x4_t)vdupq_n_f32(1)));
22}
23
mtkleind2ffd362015-05-12 06:11:21 -070024// Well, this is absurd. The shifts require compile-time constant arguments.
25
26#define SHIFT8(op, v, bits) switch(bits) { \
27 case 1: return op(v, 1); case 2: return op(v, 2); case 3: return op(v, 3); \
28 case 4: return op(v, 4); case 5: return op(v, 5); case 6: return op(v, 6); \
29 case 7: return op(v, 7); \
30 } return fVec
31
32#define SHIFT16(op, v, bits) if (bits < 8) { SHIFT8(op, v, bits); } switch(bits) { \
33 case 8: return op(v, 8); case 9: return op(v, 9); \
34 case 10: return op(v, 10); case 11: return op(v, 11); case 12: return op(v, 12); \
35 case 13: return op(v, 13); case 14: return op(v, 14); case 15: return op(v, 15); \
36 } return fVec
37
38#define SHIFT32(op, v, bits) if (bits < 16) { SHIFT16(op, v, bits); } switch(bits) { \
39 case 16: return op(v, 16); case 17: return op(v, 17); case 18: return op(v, 18); \
40 case 19: return op(v, 19); case 20: return op(v, 20); case 21: return op(v, 21); \
41 case 22: return op(v, 22); case 23: return op(v, 23); case 24: return op(v, 24); \
42 case 25: return op(v, 25); case 26: return op(v, 26); case 27: return op(v, 27); \
43 case 28: return op(v, 28); case 29: return op(v, 29); case 30: return op(v, 30); \
44 case 31: return op(v, 31); } return fVec
45
mtkleinc9adb052015-03-30 10:50:27 -070046template <>
mtklein6c221b42015-11-20 13:53:19 -080047class SkNx<2, float> {
mtkleinc9adb052015-03-30 10:50:27 -070048public:
mtklein6c221b42015-11-20 13:53:19 -080049 SkNx(float32x2_t vec) : fVec(vec) {}
mtkleinc9adb052015-03-30 10:50:27 -070050
mtklein6c221b42015-11-20 13:53:19 -080051 SkNx() {}
52 SkNx(float val) : fVec(vdup_n_f32(val)) {}
mtklein507ef6d2016-01-31 08:02:47 -080053 static SkNx Load(const void* ptr) { return vld1_f32((const float*)ptr); }
mtklein6c221b42015-11-20 13:53:19 -080054 SkNx(float a, float b) { fVec = (float32x2_t) { a, b }; }
mtkleinc9adb052015-03-30 10:50:27 -070055
mtklein507ef6d2016-01-31 08:02:47 -080056 void store(void* ptr) const { vst1_f32((float*)ptr, fVec); }
mtkleinc9adb052015-03-30 10:50:27 -070057
mtklein6c221b42015-11-20 13:53:19 -080058 SkNx approxInvert() const {
mtkleinc9adb052015-03-30 10:50:27 -070059 float32x2_t est0 = vrecpe_f32(fVec),
60 est1 = vmul_f32(vrecps_f32(est0, fVec), est0);
61 return est1;
62 }
mtklein6c221b42015-11-20 13:53:19 -080063 SkNx invert() const {
mtkleinc9adb052015-03-30 10:50:27 -070064 float32x2_t est1 = this->approxInvert().fVec,
65 est2 = vmul_f32(vrecps_f32(est1, fVec), est1);
66 return est2;
67 }
68
mtklein6c221b42015-11-20 13:53:19 -080069 SkNx operator + (const SkNx& o) const { return vadd_f32(fVec, o.fVec); }
70 SkNx operator - (const SkNx& o) const { return vsub_f32(fVec, o.fVec); }
71 SkNx operator * (const SkNx& o) const { return vmul_f32(fVec, o.fVec); }
72 SkNx operator / (const SkNx& o) const {
mtkleinc9adb052015-03-30 10:50:27 -070073 #if defined(SK_CPU_ARM64)
74 return vdiv_f32(fVec, o.fVec);
75 #else
76 return vmul_f32(fVec, o.invert().fVec);
77 #endif
78 }
79
mtklein6c221b42015-11-20 13:53:19 -080080 SkNx operator == (const SkNx& o) const { return vreinterpret_f32_u32(vceq_f32(fVec, o.fVec)); }
81 SkNx operator < (const SkNx& o) const { return vreinterpret_f32_u32(vclt_f32(fVec, o.fVec)); }
82 SkNx operator > (const SkNx& o) const { return vreinterpret_f32_u32(vcgt_f32(fVec, o.fVec)); }
83 SkNx operator <= (const SkNx& o) const { return vreinterpret_f32_u32(vcle_f32(fVec, o.fVec)); }
84 SkNx operator >= (const SkNx& o) const { return vreinterpret_f32_u32(vcge_f32(fVec, o.fVec)); }
85 SkNx operator != (const SkNx& o) const {
mtkleinb5e86112015-06-24 15:18:39 -070086 return vreinterpret_f32_u32(vmvn_u32(vceq_f32(fVec, o.fVec)));
87 }
mtkleinc9adb052015-03-30 10:50:27 -070088
mtklein6c221b42015-11-20 13:53:19 -080089 static SkNx Min(const SkNx& l, const SkNx& r) { return vmin_f32(l.fVec, r.fVec); }
90 static SkNx Max(const SkNx& l, const SkNx& r) { return vmax_f32(l.fVec, r.fVec); }
mtkleinc9adb052015-03-30 10:50:27 -070091
mtklein6c221b42015-11-20 13:53:19 -080092 SkNx rsqrt0() const { return vrsqrte_f32(fVec); }
93 SkNx rsqrt1() const {
mtkleind7c014f2015-04-27 14:22:32 -070094 float32x2_t est0 = this->rsqrt0().fVec;
95 return vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0);
96 }
mtklein6c221b42015-11-20 13:53:19 -080097 SkNx rsqrt2() const {
mtkleind7c014f2015-04-27 14:22:32 -070098 float32x2_t est1 = this->rsqrt1().fVec;
99 return vmul_f32(vrsqrts_f32(fVec, vmul_f32(est1, est1)), est1);
mtkleinc9adb052015-03-30 10:50:27 -0700100 }
101
mtklein6c221b42015-11-20 13:53:19 -0800102 SkNx sqrt() const {
mtkleinc9adb052015-03-30 10:50:27 -0700103 #if defined(SK_CPU_ARM64)
104 return vsqrt_f32(fVec);
105 #else
mtkleind7c014f2015-04-27 14:22:32 -0700106 return *this * this->rsqrt2();
mtkleinc9adb052015-03-30 10:50:27 -0700107 #endif
108 }
109
mtkleine4c0bee2016-02-09 10:35:27 -0800110 float operator[](int k) const {
mtkleinc9adb052015-03-30 10:50:27 -0700111 SkASSERT(0 <= k && k < 2);
mtkleine4c0bee2016-02-09 10:35:27 -0800112 union { float32x2_t v; float fs[2]; } pun = {fVec};
113 return pun.fs[k&1];
mtkleinc9adb052015-03-30 10:50:27 -0700114 }
mtkleine4c0bee2016-02-09 10:35:27 -0800115 template <int k> float kth() const { return (*this)[k]; }
mtkleinc9adb052015-03-30 10:50:27 -0700116
mtkleinb5e86112015-06-24 15:18:39 -0700117 bool allTrue() const {
118 auto v = vreinterpret_u32_f32(fVec);
119 return vget_lane_u32(v,0) && vget_lane_u32(v,1);
120 }
121 bool anyTrue() const {
122 auto v = vreinterpret_u32_f32(fVec);
123 return vget_lane_u32(v,0) || vget_lane_u32(v,1);
124 }
125
mtkleinc9adb052015-03-30 10:50:27 -0700126 float32x2_t fVec;
127};
128
mtkleinc9adb052015-03-30 10:50:27 -0700129template <>
mtklein6c221b42015-11-20 13:53:19 -0800130class SkNx<4, float> {
mtkleinc9adb052015-03-30 10:50:27 -0700131public:
mtklein6c221b42015-11-20 13:53:19 -0800132 SkNx(float32x4_t vec) : fVec(vec) {}
mtkleinc9adb052015-03-30 10:50:27 -0700133
mtklein6c221b42015-11-20 13:53:19 -0800134 SkNx() {}
135 SkNx(float val) : fVec(vdupq_n_f32(val)) {}
mtklein507ef6d2016-01-31 08:02:47 -0800136 static SkNx Load(const void* ptr) { return vld1q_f32((const float*)ptr); }
mtklein6c221b42015-11-20 13:53:19 -0800137 SkNx(float a, float b, float c, float d) { fVec = (float32x4_t) { a, b, c, d }; }
mtkleinc9adb052015-03-30 10:50:27 -0700138
mtklein507ef6d2016-01-31 08:02:47 -0800139 void store(void* ptr) const { vst1q_f32((float*)ptr, fVec); }
mtklein6c221b42015-11-20 13:53:19 -0800140 SkNx approxInvert() const {
mtkleinc9adb052015-03-30 10:50:27 -0700141 float32x4_t est0 = vrecpeq_f32(fVec),
142 est1 = vmulq_f32(vrecpsq_f32(est0, fVec), est0);
143 return est1;
144 }
mtklein6c221b42015-11-20 13:53:19 -0800145 SkNx invert() const {
mtkleinc9adb052015-03-30 10:50:27 -0700146 float32x4_t est1 = this->approxInvert().fVec,
147 est2 = vmulq_f32(vrecpsq_f32(est1, fVec), est1);
148 return est2;
149 }
150
mtklein6c221b42015-11-20 13:53:19 -0800151 SkNx operator + (const SkNx& o) const { return vaddq_f32(fVec, o.fVec); }
152 SkNx operator - (const SkNx& o) const { return vsubq_f32(fVec, o.fVec); }
153 SkNx operator * (const SkNx& o) const { return vmulq_f32(fVec, o.fVec); }
154 SkNx operator / (const SkNx& o) const {
mtkleinc9adb052015-03-30 10:50:27 -0700155 #if defined(SK_CPU_ARM64)
156 return vdivq_f32(fVec, o.fVec);
157 #else
158 return vmulq_f32(fVec, o.invert().fVec);
159 #endif
160 }
161
mtklein6c221b42015-11-20 13:53:19 -0800162 SkNx operator==(const SkNx& o) const { return vreinterpretq_f32_u32(vceqq_f32(fVec, o.fVec)); }
163 SkNx operator <(const SkNx& o) const { return vreinterpretq_f32_u32(vcltq_f32(fVec, o.fVec)); }
164 SkNx operator >(const SkNx& o) const { return vreinterpretq_f32_u32(vcgtq_f32(fVec, o.fVec)); }
165 SkNx operator<=(const SkNx& o) const { return vreinterpretq_f32_u32(vcleq_f32(fVec, o.fVec)); }
166 SkNx operator>=(const SkNx& o) const { return vreinterpretq_f32_u32(vcgeq_f32(fVec, o.fVec)); }
167 SkNx operator!=(const SkNx& o) const {
mtkleinb5e86112015-06-24 15:18:39 -0700168 return vreinterpretq_f32_u32(vmvnq_u32(vceqq_f32(fVec, o.fVec)));
169 }
mtkleinc9adb052015-03-30 10:50:27 -0700170
mtklein6c221b42015-11-20 13:53:19 -0800171 static SkNx Min(const SkNx& l, const SkNx& r) { return vminq_f32(l.fVec, r.fVec); }
172 static SkNx Max(const SkNx& l, const SkNx& r) { return vmaxq_f32(l.fVec, r.fVec); }
mtkleinc9adb052015-03-30 10:50:27 -0700173
mtkleinc33065a2016-01-15 12:16:40 -0800174 SkNx abs() const { return vabsq_f32(fVec); }
mtklein86c6c492016-02-09 13:46:49 -0800175 SkNx floor() const {
176 #if defined(SK_CPU_ARM64)
177 return vrndmq_f32(fVec);
178 #else
179 return armv7_vrndmq_f32(fVec);
180 #endif
181 }
182
mtkleinc33065a2016-01-15 12:16:40 -0800183
mtklein6c221b42015-11-20 13:53:19 -0800184 SkNx rsqrt0() const { return vrsqrteq_f32(fVec); }
185 SkNx rsqrt1() const {
mtkleind7c014f2015-04-27 14:22:32 -0700186 float32x4_t est0 = this->rsqrt0().fVec;
187 return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0);
188 }
mtklein6c221b42015-11-20 13:53:19 -0800189 SkNx rsqrt2() const {
mtkleind7c014f2015-04-27 14:22:32 -0700190 float32x4_t est1 = this->rsqrt1().fVec;
191 return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est1, est1)), est1);
mtkleinc9adb052015-03-30 10:50:27 -0700192 }
193
mtklein6c221b42015-11-20 13:53:19 -0800194 SkNx sqrt() const {
mtkleinc9adb052015-03-30 10:50:27 -0700195 #if defined(SK_CPU_ARM64)
196 return vsqrtq_f32(fVec);
197 #else
mtkleind7c014f2015-04-27 14:22:32 -0700198 return *this * this->rsqrt2();
mtkleinc9adb052015-03-30 10:50:27 -0700199 #endif
200 }
201
mtkleine4c0bee2016-02-09 10:35:27 -0800202 float operator[](int k) const {
mtkleinc9adb052015-03-30 10:50:27 -0700203 SkASSERT(0 <= k && k < 4);
mtkleine4c0bee2016-02-09 10:35:27 -0800204 union { float32x4_t v; float fs[4]; } pun = {fVec};
205 return pun.fs[k&3];
mtkleinc9adb052015-03-30 10:50:27 -0700206 }
mtkleine4c0bee2016-02-09 10:35:27 -0800207 template <int k> float kth() const { return (*this)[k]; }
mtkleinc9adb052015-03-30 10:50:27 -0700208
mtkleinb5e86112015-06-24 15:18:39 -0700209 bool allTrue() const {
210 auto v = vreinterpretq_u32_f32(fVec);
211 return vgetq_lane_u32(v,0) && vgetq_lane_u32(v,1)
212 && vgetq_lane_u32(v,2) && vgetq_lane_u32(v,3);
213 }
214 bool anyTrue() const {
215 auto v = vreinterpretq_u32_f32(fVec);
216 return vgetq_lane_u32(v,0) || vgetq_lane_u32(v,1)
217 || vgetq_lane_u32(v,2) || vgetq_lane_u32(v,3);
218 }
219
mtklein6c221b42015-11-20 13:53:19 -0800220 SkNx thenElse(const SkNx& t, const SkNx& e) const {
mtkleincf4e5672015-07-27 06:12:05 -0700221 return vbslq_f32(vreinterpretq_u32_f32(fVec), t.fVec, e.fVec);
mtklein2aab22a2015-06-26 10:46:31 -0700222 }
223
mtkleinc9adb052015-03-30 10:50:27 -0700224 float32x4_t fVec;
225};
226
mtklein550e9b02016-01-20 11:55:51 -0800227// It's possible that for our current use cases, representing this as
228// half a uint16x8_t might be better than representing it as a uint16x4_t.
229// It'd make conversion to Sk4b one step simpler.
230template <>
231class SkNx<4, uint16_t> {
232public:
233 SkNx(const uint16x4_t& vec) : fVec(vec) {}
234
235 SkNx() {}
236 SkNx(uint16_t val) : fVec(vdup_n_u16(val)) {}
mtklein507ef6d2016-01-31 08:02:47 -0800237 static SkNx Load(const void* ptr) { return vld1_u16((const uint16_t*)ptr); }
mtklein550e9b02016-01-20 11:55:51 -0800238
239 SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d) {
240 fVec = (uint16x4_t) { a,b,c,d };
241 }
242
mtklein507ef6d2016-01-31 08:02:47 -0800243 void store(void* ptr) const { vst1_u16((uint16_t*)ptr, fVec); }
mtklein550e9b02016-01-20 11:55:51 -0800244
245 SkNx operator + (const SkNx& o) const { return vadd_u16(fVec, o.fVec); }
246 SkNx operator - (const SkNx& o) const { return vsub_u16(fVec, o.fVec); }
247 SkNx operator * (const SkNx& o) const { return vmul_u16(fVec, o.fVec); }
248
249 SkNx operator << (int bits) const { SHIFT16(vshl_n_u16, fVec, bits); }
250 SkNx operator >> (int bits) const { SHIFT16(vshr_n_u16, fVec, bits); }
251
252 static SkNx Min(const SkNx& a, const SkNx& b) { return vmin_u16(a.fVec, b.fVec); }
253
mtkleine4c0bee2016-02-09 10:35:27 -0800254 uint16_t operator[](int k) const {
mtklein550e9b02016-01-20 11:55:51 -0800255 SkASSERT(0 <= k && k < 4);
mtkleine4c0bee2016-02-09 10:35:27 -0800256 union { uint16x4_t v; uint16_t us[4]; } pun = {fVec};
257 return pun.us[k&3];
mtklein550e9b02016-01-20 11:55:51 -0800258 }
mtkleine4c0bee2016-02-09 10:35:27 -0800259 template <int k> uint16_t kth() const { return (*this)[k]; }
mtklein550e9b02016-01-20 11:55:51 -0800260
261 SkNx thenElse(const SkNx& t, const SkNx& e) const {
262 return vbsl_u16(fVec, t.fVec, e.fVec);
263 }
264
265 uint16x4_t fVec;
266};
267
mtkleind2ffd362015-05-12 06:11:21 -0700268template <>
mtklein6c221b42015-11-20 13:53:19 -0800269class SkNx<8, uint16_t> {
mtkleind2ffd362015-05-12 06:11:21 -0700270public:
mtklein6c221b42015-11-20 13:53:19 -0800271 SkNx(const uint16x8_t& vec) : fVec(vec) {}
mtkleind2ffd362015-05-12 06:11:21 -0700272
mtklein6c221b42015-11-20 13:53:19 -0800273 SkNx() {}
274 SkNx(uint16_t val) : fVec(vdupq_n_u16(val)) {}
mtklein507ef6d2016-01-31 08:02:47 -0800275 static SkNx Load(const void* ptr) { return vld1q_u16((const uint16_t*)ptr); }
mtkleind2ffd362015-05-12 06:11:21 -0700276
mtklein6c221b42015-11-20 13:53:19 -0800277 SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d,
mtkleind2ffd362015-05-12 06:11:21 -0700278 uint16_t e, uint16_t f, uint16_t g, uint16_t h) {
279 fVec = (uint16x8_t) { a,b,c,d, e,f,g,h };
280 }
281
mtklein507ef6d2016-01-31 08:02:47 -0800282 void store(void* ptr) const { vst1q_u16((uint16_t*)ptr, fVec); }
mtkleind2ffd362015-05-12 06:11:21 -0700283
mtklein6c221b42015-11-20 13:53:19 -0800284 SkNx operator + (const SkNx& o) const { return vaddq_u16(fVec, o.fVec); }
285 SkNx operator - (const SkNx& o) const { return vsubq_u16(fVec, o.fVec); }
286 SkNx operator * (const SkNx& o) const { return vmulq_u16(fVec, o.fVec); }
mtkleind2ffd362015-05-12 06:11:21 -0700287
mtklein6c221b42015-11-20 13:53:19 -0800288 SkNx operator << (int bits) const { SHIFT16(vshlq_n_u16, fVec, bits); }
289 SkNx operator >> (int bits) const { SHIFT16(vshrq_n_u16, fVec, bits); }
mtkleind2ffd362015-05-12 06:11:21 -0700290
mtklein6c221b42015-11-20 13:53:19 -0800291 static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u16(a.fVec, b.fVec); }
mtklein27e517a2015-05-14 17:53:04 -0700292
mtkleine4c0bee2016-02-09 10:35:27 -0800293 uint16_t operator[](int k) const {
mtkleind2ffd362015-05-12 06:11:21 -0700294 SkASSERT(0 <= k && k < 8);
mtkleine4c0bee2016-02-09 10:35:27 -0800295 union { uint16x8_t v; uint16_t us[8]; } pun = {fVec};
296 return pun.us[k&7];
mtkleind2ffd362015-05-12 06:11:21 -0700297 }
mtkleine4c0bee2016-02-09 10:35:27 -0800298 template <int k> uint16_t kth() const { return (*this)[k]; }
mtkleind2ffd362015-05-12 06:11:21 -0700299
mtklein6c221b42015-11-20 13:53:19 -0800300 SkNx thenElse(const SkNx& t, const SkNx& e) const {
mtkleincf4e5672015-07-27 06:12:05 -0700301 return vbslq_u16(fVec, t.fVec, e.fVec);
mtklein4be181e2015-07-14 10:54:19 -0700302 }
303
mtkleind2ffd362015-05-12 06:11:21 -0700304 uint16x8_t fVec;
305};
306
307template <>
mtklein6f37b4a2015-12-14 11:25:18 -0800308class SkNx<4, uint8_t> {
309public:
310 SkNx(const uint8x8_t& vec) : fVec(vec) {}
311
312 SkNx() {}
mtklein507ef6d2016-01-31 08:02:47 -0800313 static SkNx Load(const void* ptr) {
314 return (uint8x8_t)vld1_dup_u32((const uint32_t*)ptr);
mtklein6f37b4a2015-12-14 11:25:18 -0800315 }
mtklein507ef6d2016-01-31 08:02:47 -0800316 void store(void* ptr) const {
317 return vst1_lane_u32((uint32_t*)ptr, (uint32x2_t)fVec, 0);
mtklein6f37b4a2015-12-14 11:25:18 -0800318 }
319
320 // TODO as needed
321
322 uint8x8_t fVec;
323};
324
325template <>
mtklein6c221b42015-11-20 13:53:19 -0800326class SkNx<16, uint8_t> {
mtkleind2ffd362015-05-12 06:11:21 -0700327public:
mtklein6c221b42015-11-20 13:53:19 -0800328 SkNx(const uint8x16_t& vec) : fVec(vec) {}
mtkleind2ffd362015-05-12 06:11:21 -0700329
mtklein6c221b42015-11-20 13:53:19 -0800330 SkNx() {}
331 SkNx(uint8_t val) : fVec(vdupq_n_u8(val)) {}
mtklein507ef6d2016-01-31 08:02:47 -0800332 static SkNx Load(const void* ptr) { return vld1q_u8((const uint8_t*)ptr); }
mtkleind2ffd362015-05-12 06:11:21 -0700333
mtklein6c221b42015-11-20 13:53:19 -0800334 SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d,
mtkleind2ffd362015-05-12 06:11:21 -0700335 uint8_t e, uint8_t f, uint8_t g, uint8_t h,
336 uint8_t i, uint8_t j, uint8_t k, uint8_t l,
337 uint8_t m, uint8_t n, uint8_t o, uint8_t p) {
338 fVec = (uint8x16_t) { a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p };
339 }
340
mtklein507ef6d2016-01-31 08:02:47 -0800341 void store(void* ptr) const { vst1q_u8((uint8_t*)ptr, fVec); }
mtkleind2ffd362015-05-12 06:11:21 -0700342
mtklein6c221b42015-11-20 13:53:19 -0800343 SkNx saturatedAdd(const SkNx& o) const { return vqaddq_u8(fVec, o.fVec); }
mtklein04d24a32015-05-13 08:02:14 -0700344
mtklein6c221b42015-11-20 13:53:19 -0800345 SkNx operator + (const SkNx& o) const { return vaddq_u8(fVec, o.fVec); }
346 SkNx operator - (const SkNx& o) const { return vsubq_u8(fVec, o.fVec); }
mtkleind2ffd362015-05-12 06:11:21 -0700347
mtklein6c221b42015-11-20 13:53:19 -0800348 static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u8(a.fVec, b.fVec); }
349 SkNx operator < (const SkNx& o) const { return vcltq_u8(fVec, o.fVec); }
mtklein27e517a2015-05-14 17:53:04 -0700350
mtkleine4c0bee2016-02-09 10:35:27 -0800351 uint8_t operator[](int k) const {
352 SkASSERT(0 <= k && k < 16);
353 union { uint8x16_t v; uint8_t us[16]; } pun = {fVec};
354 return pun.us[k&15];
mtkleind2ffd362015-05-12 06:11:21 -0700355 }
mtkleine4c0bee2016-02-09 10:35:27 -0800356 template <int k> uint8_t kth() const { return (*this)[k]; }
mtkleind2ffd362015-05-12 06:11:21 -0700357
mtklein6c221b42015-11-20 13:53:19 -0800358 SkNx thenElse(const SkNx& t, const SkNx& e) const {
mtkleincf4e5672015-07-27 06:12:05 -0700359 return vbslq_u8(fVec, t.fVec, e.fVec);
mtkleinb5e86112015-06-24 15:18:39 -0700360 }
361
mtkleind2ffd362015-05-12 06:11:21 -0700362 uint8x16_t fVec;
363};
364
365#undef SHIFT32
366#undef SHIFT16
367#undef SHIFT8
368
mtkleine4c0bee2016-02-09 10:35:27 -0800369template<> inline Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) {
mtklein6f37b4a2015-12-14 11:25:18 -0800370 uint32x4_t _32 = vcvtq_u32_f32(src.fVec);
371 uint16x4_t _16 = vqmovn_u32(_32);
372 return vqmovn_u16(vcombine_u16(_16, _16));
373}
374
mtkleine4c0bee2016-02-09 10:35:27 -0800375template<> inline Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) {
mtklein6f37b4a2015-12-14 11:25:18 -0800376 uint16x8_t _16 = vmovl_u8 (src.fVec) ;
377 uint32x4_t _32 = vmovl_u16(vget_low_u16(_16));
378 return vcvtq_f32_u32(_32);
379}
380
381static inline void Sk4f_ToBytes(uint8_t bytes[16],
382 const Sk4f& a, const Sk4f& b, const Sk4f& c, const Sk4f& d) {
383 vst1q_u8(bytes, vuzpq_u8(vuzpq_u8((uint8x16_t)vcvtq_u32_f32(a.fVec),
384 (uint8x16_t)vcvtq_u32_f32(b.fVec)).val[0],
385 vuzpq_u8((uint8x16_t)vcvtq_u32_f32(c.fVec),
386 (uint8x16_t)vcvtq_u32_f32(d.fVec)).val[0]).val[0]);
387}
388
mtkleine4c0bee2016-02-09 10:35:27 -0800389template<> inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) {
mtklein550e9b02016-01-20 11:55:51 -0800390 return vget_low_u16(vmovl_u8(src.fVec));
391}
392
mtkleine4c0bee2016-02-09 10:35:27 -0800393template<> inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) {
mtklein550e9b02016-01-20 11:55:51 -0800394 return vmovn_u16(vcombine_u16(src.fVec, src.fVec));
395}
396
mtkleinc9adb052015-03-30 10:50:27 -0700397#endif//SkNx_neon_DEFINED