blob: 32be78f66b126a35afff73ff0e071756fbea8cfc [file] [log] [blame]
mtkleinc9adb052015-03-30 10:50:27 -07001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef SkNx_neon_DEFINED
9#define SkNx_neon_DEFINED
10
mtkleine18fa442016-06-09 13:40:56 -070011#include <arm_neon.h>
12
Mike Klein1e764642016-10-14 17:09:03 -040013namespace {
14
mtklein126626e2016-02-09 15:41:36 -080015// ARMv8 has vrndmq_f32 to floor 4 floats. Here we emulate it:
mtkleine5fe9a42016-02-10 07:55:56 -080016// - roundtrip through integers via truncation
17// - subtract 1 if that's too big (possible for negative values).
18// This restricts the domain of our inputs to a maximum somehwere around 2^31. Seems plenty big.
Mike Klein7c78f3a2016-10-19 09:21:11 -040019AI static float32x4_t armv7_vrndmq_f32(float32x4_t v) {
mtklein7c0db752016-07-30 14:18:49 -070020 auto roundtrip = vcvtq_f32_s32(vcvtq_s32_f32(v));
21 auto too_big = vcgtq_f32(roundtrip, v);
22 return vsubq_f32(roundtrip, (float32x4_t)vandq_u32(too_big, (uint32x4_t)vdupq_n_f32(1)));
mtklein126626e2016-02-09 15:41:36 -080023}
24
mtkleinc9adb052015-03-30 10:50:27 -070025template <>
mtklein6c221b42015-11-20 13:53:19 -080026class SkNx<2, float> {
mtkleinc9adb052015-03-30 10:50:27 -070027public:
Mike Klein7c78f3a2016-10-19 09:21:11 -040028 AI SkNx(float32x2_t vec) : fVec(vec) {}
mtkleinc9adb052015-03-30 10:50:27 -070029
Mike Klein7c78f3a2016-10-19 09:21:11 -040030 AI SkNx() {}
31 AI SkNx(float val) : fVec(vdup_n_f32(val)) {}
32 AI SkNx(float a, float b) { fVec = (float32x2_t) { a, b }; }
mtklein7c0db752016-07-30 14:18:49 -070033
Mike Klein7c78f3a2016-10-19 09:21:11 -040034 AI static SkNx Load(const void* ptr) { return vld1_f32((const float*)ptr); }
35 AI void store(void* ptr) const { vst1_f32((float*)ptr, fVec); }
mtkleinc9adb052015-03-30 10:50:27 -070036
Mike Klein7c78f3a2016-10-19 09:21:11 -040037 AI SkNx invert() const {
mtklein7c0db752016-07-30 14:18:49 -070038 float32x2_t est0 = vrecpe_f32(fVec),
39 est1 = vmul_f32(vrecps_f32(est0, fVec), est0);
40 return est1;
41 }
mtkleinc9adb052015-03-30 10:50:27 -070042
Chris Dalton7732f4f2017-08-28 14:45:40 -060043 AI SkNx operator - () const { return vneg_f32(fVec); }
44
Mike Klein7c78f3a2016-10-19 09:21:11 -040045 AI SkNx operator + (const SkNx& o) const { return vadd_f32(fVec, o.fVec); }
46 AI SkNx operator - (const SkNx& o) const { return vsub_f32(fVec, o.fVec); }
47 AI SkNx operator * (const SkNx& o) const { return vmul_f32(fVec, o.fVec); }
48 AI SkNx operator / (const SkNx& o) const {
mtklein7c0db752016-07-30 14:18:49 -070049 #if defined(SK_CPU_ARM64)
50 return vdiv_f32(fVec, o.fVec);
51 #else
52 float32x2_t est0 = vrecpe_f32(o.fVec),
53 est1 = vmul_f32(vrecps_f32(est0, o.fVec), est0),
54 est2 = vmul_f32(vrecps_f32(est1, o.fVec), est1);
55 return vmul_f32(fVec, est2);
56 #endif
57 }
58
Mike Klein7c78f3a2016-10-19 09:21:11 -040059 AI SkNx operator==(const SkNx& o) const { return vreinterpret_f32_u32(vceq_f32(fVec, o.fVec)); }
60 AI SkNx operator <(const SkNx& o) const { return vreinterpret_f32_u32(vclt_f32(fVec, o.fVec)); }
61 AI SkNx operator >(const SkNx& o) const { return vreinterpret_f32_u32(vcgt_f32(fVec, o.fVec)); }
62 AI SkNx operator<=(const SkNx& o) const { return vreinterpret_f32_u32(vcle_f32(fVec, o.fVec)); }
63 AI SkNx operator>=(const SkNx& o) const { return vreinterpret_f32_u32(vcge_f32(fVec, o.fVec)); }
64 AI SkNx operator!=(const SkNx& o) const {
mtklein7c0db752016-07-30 14:18:49 -070065 return vreinterpret_f32_u32(vmvn_u32(vceq_f32(fVec, o.fVec)));
66 }
mtkleinc9adb052015-03-30 10:50:27 -070067
Mike Klein7c78f3a2016-10-19 09:21:11 -040068 AI static SkNx Min(const SkNx& l, const SkNx& r) { return vmin_f32(l.fVec, r.fVec); }
69 AI static SkNx Max(const SkNx& l, const SkNx& r) { return vmax_f32(l.fVec, r.fVec); }
mtkleinc9adb052015-03-30 10:50:27 -070070
Chris Dalton7732f4f2017-08-28 14:45:40 -060071 AI SkNx abs() const { return vabs_f32(fVec); }
72
Mike Klein7c78f3a2016-10-19 09:21:11 -040073 AI SkNx rsqrt() const {
mtkleinf8f90e42016-03-21 10:04:46 -070074 float32x2_t est0 = vrsqrte_f32(fVec);
mtkleind7c014f2015-04-27 14:22:32 -070075 return vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0);
76 }
mtkleinc9adb052015-03-30 10:50:27 -070077
Mike Klein7c78f3a2016-10-19 09:21:11 -040078 AI SkNx sqrt() const {
mtkleinc9adb052015-03-30 10:50:27 -070079 #if defined(SK_CPU_ARM64)
80 return vsqrt_f32(fVec);
81 #else
mtkleinf8f90e42016-03-21 10:04:46 -070082 float32x2_t est0 = vrsqrte_f32(fVec),
83 est1 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0),
84 est2 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est1, est1)), est1);
85 return vmul_f32(fVec, est2);
mtkleinc9adb052015-03-30 10:50:27 -070086 #endif
87 }
88
Mike Klein7c78f3a2016-10-19 09:21:11 -040089 AI float operator[](int k) const {
mtklein7c0db752016-07-30 14:18:49 -070090 SkASSERT(0 <= k && k < 2);
91 union { float32x2_t v; float fs[2]; } pun = {fVec};
92 return pun.fs[k&1];
mtkleinc9adb052015-03-30 10:50:27 -070093 }
94
Mike Klein7c78f3a2016-10-19 09:21:11 -040095 AI bool allTrue() const {
mtkleinb5e86112015-06-24 15:18:39 -070096 auto v = vreinterpret_u32_f32(fVec);
97 return vget_lane_u32(v,0) && vget_lane_u32(v,1);
98 }
Mike Klein7c78f3a2016-10-19 09:21:11 -040099 AI bool anyTrue() const {
mtkleinb5e86112015-06-24 15:18:39 -0700100 auto v = vreinterpret_u32_f32(fVec);
101 return vget_lane_u32(v,0) || vget_lane_u32(v,1);
102 }
103
Chris Dalton7732f4f2017-08-28 14:45:40 -0600104 AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
105 return vbsl_f32(vreinterpret_u32_f32(fVec), t.fVec, e.fVec);
106 }
107
mtkleinc9adb052015-03-30 10:50:27 -0700108 float32x2_t fVec;
109};
110
mtkleinc9adb052015-03-30 10:50:27 -0700111template <>
mtklein6c221b42015-11-20 13:53:19 -0800112class SkNx<4, float> {
mtkleinc9adb052015-03-30 10:50:27 -0700113public:
Mike Klein7c78f3a2016-10-19 09:21:11 -0400114 AI SkNx(float32x4_t vec) : fVec(vec) {}
mtkleinc9adb052015-03-30 10:50:27 -0700115
Mike Klein7c78f3a2016-10-19 09:21:11 -0400116 AI SkNx() {}
117 AI SkNx(float val) : fVec(vdupq_n_f32(val)) {}
118 AI SkNx(float a, float b, float c, float d) { fVec = (float32x4_t) { a, b, c, d }; }
mtklein7c0db752016-07-30 14:18:49 -0700119
Mike Klein7c78f3a2016-10-19 09:21:11 -0400120 AI static SkNx Load(const void* ptr) { return vld1q_f32((const float*)ptr); }
121 AI void store(void* ptr) const { vst1q_f32((float*)ptr, fVec); }
Mike Klein33cbfd72016-10-06 11:09:27 -0400122
Mike Klein7c78f3a2016-10-19 09:21:11 -0400123 AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
Mike Klein33cbfd72016-10-06 11:09:27 -0400124 float32x4x4_t rgba = vld4q_f32((const float*) ptr);
125 *r = rgba.val[0];
126 *g = rgba.val[1];
127 *b = rgba.val[2];
128 *a = rgba.val[3];
129 }
Mike Klein7c78f3a2016-10-19 09:21:11 -0400130 AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
Mike Klein33cbfd72016-10-06 11:09:27 -0400131 float32x4x4_t rgba = {{
132 r.fVec,
133 g.fVec,
134 b.fVec,
135 a.fVec,
136 }};
137 vst4q_f32((float*) dst, rgba);
138 }
139
Mike Klein7c78f3a2016-10-19 09:21:11 -0400140 AI SkNx invert() const {
mtklein7c0db752016-07-30 14:18:49 -0700141 float32x4_t est0 = vrecpeq_f32(fVec),
142 est1 = vmulq_f32(vrecpsq_f32(est0, fVec), est0);
143 return est1;
144 }
mtkleinc9adb052015-03-30 10:50:27 -0700145
Chris Dalton7732f4f2017-08-28 14:45:40 -0600146 AI SkNx operator - () const { return vnegq_f32(fVec); }
147
Mike Klein7c78f3a2016-10-19 09:21:11 -0400148 AI SkNx operator + (const SkNx& o) const { return vaddq_f32(fVec, o.fVec); }
149 AI SkNx operator - (const SkNx& o) const { return vsubq_f32(fVec, o.fVec); }
150 AI SkNx operator * (const SkNx& o) const { return vmulq_f32(fVec, o.fVec); }
151 AI SkNx operator / (const SkNx& o) const {
mtklein7c0db752016-07-30 14:18:49 -0700152 #if defined(SK_CPU_ARM64)
153 return vdivq_f32(fVec, o.fVec);
154 #else
155 float32x4_t est0 = vrecpeq_f32(o.fVec),
156 est1 = vmulq_f32(vrecpsq_f32(est0, o.fVec), est0),
157 est2 = vmulq_f32(vrecpsq_f32(est1, o.fVec), est1);
158 return vmulq_f32(fVec, est2);
159 #endif
160 }
mtkleinc9adb052015-03-30 10:50:27 -0700161
Mike Klein7c78f3a2016-10-19 09:21:11 -0400162 AI SkNx operator==(const SkNx& o) const {return vreinterpretq_f32_u32(vceqq_f32(fVec, o.fVec));}
163 AI SkNx operator <(const SkNx& o) const {return vreinterpretq_f32_u32(vcltq_f32(fVec, o.fVec));}
164 AI SkNx operator >(const SkNx& o) const {return vreinterpretq_f32_u32(vcgtq_f32(fVec, o.fVec));}
165 AI SkNx operator<=(const SkNx& o) const {return vreinterpretq_f32_u32(vcleq_f32(fVec, o.fVec));}
166 AI SkNx operator>=(const SkNx& o) const {return vreinterpretq_f32_u32(vcgeq_f32(fVec, o.fVec));}
167 AI SkNx operator!=(const SkNx& o) const {
mtklein7c0db752016-07-30 14:18:49 -0700168 return vreinterpretq_f32_u32(vmvnq_u32(vceqq_f32(fVec, o.fVec)));
169 }
mtkleinc9adb052015-03-30 10:50:27 -0700170
Mike Klein7c78f3a2016-10-19 09:21:11 -0400171 AI static SkNx Min(const SkNx& l, const SkNx& r) { return vminq_f32(l.fVec, r.fVec); }
172 AI static SkNx Max(const SkNx& l, const SkNx& r) { return vmaxq_f32(l.fVec, r.fVec); }
mtkleinc9adb052015-03-30 10:50:27 -0700173
Mike Klein7c78f3a2016-10-19 09:21:11 -0400174 AI SkNx abs() const { return vabsq_f32(fVec); }
175 AI SkNx floor() const {
mtklein126626e2016-02-09 15:41:36 -0800176 #if defined(SK_CPU_ARM64)
177 return vrndmq_f32(fVec);
178 #else
179 return armv7_vrndmq_f32(fVec);
180 #endif
181 }
182
mtklein7c0db752016-07-30 14:18:49 -0700183
Mike Klein7c78f3a2016-10-19 09:21:11 -0400184 AI SkNx rsqrt() const {
mtkleinf8f90e42016-03-21 10:04:46 -0700185 float32x4_t est0 = vrsqrteq_f32(fVec);
mtkleind7c014f2015-04-27 14:22:32 -0700186 return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0);
187 }
mtkleinc9adb052015-03-30 10:50:27 -0700188
Mike Klein7c78f3a2016-10-19 09:21:11 -0400189 AI SkNx sqrt() const {
mtkleinc9adb052015-03-30 10:50:27 -0700190 #if defined(SK_CPU_ARM64)
191 return vsqrtq_f32(fVec);
192 #else
mtkleinf8f90e42016-03-21 10:04:46 -0700193 float32x4_t est0 = vrsqrteq_f32(fVec),
194 est1 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0),
195 est2 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est1, est1)), est1);
196 return vmulq_f32(fVec, est2);
mtkleinc9adb052015-03-30 10:50:27 -0700197 #endif
198 }
199
Mike Klein7c78f3a2016-10-19 09:21:11 -0400200 AI float operator[](int k) const {
mtklein7c0db752016-07-30 14:18:49 -0700201 SkASSERT(0 <= k && k < 4);
202 union { float32x4_t v; float fs[4]; } pun = {fVec};
203 return pun.fs[k&3];
mtkleinc9adb052015-03-30 10:50:27 -0700204 }
205
Mike Klein7c78f3a2016-10-19 09:21:11 -0400206 AI bool allTrue() const {
mtkleinb5e86112015-06-24 15:18:39 -0700207 auto v = vreinterpretq_u32_f32(fVec);
208 return vgetq_lane_u32(v,0) && vgetq_lane_u32(v,1)
209 && vgetq_lane_u32(v,2) && vgetq_lane_u32(v,3);
210 }
Mike Klein7c78f3a2016-10-19 09:21:11 -0400211 AI bool anyTrue() const {
mtkleinb5e86112015-06-24 15:18:39 -0700212 auto v = vreinterpretq_u32_f32(fVec);
213 return vgetq_lane_u32(v,0) || vgetq_lane_u32(v,1)
214 || vgetq_lane_u32(v,2) || vgetq_lane_u32(v,3);
215 }
216
Mike Klein7c78f3a2016-10-19 09:21:11 -0400217 AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
mtkleincf4e5672015-07-27 06:12:05 -0700218 return vbslq_f32(vreinterpretq_u32_f32(fVec), t.fVec, e.fVec);
mtklein2aab22a2015-06-26 10:46:31 -0700219 }
220
mtkleinc9adb052015-03-30 10:50:27 -0700221 float32x4_t fVec;
222};
223
Mike Kleinf0348c22016-11-03 14:43:48 -0400224#if defined(SK_CPU_ARM64)
225 AI static Sk4f SkNx_fma(const Sk4f& f, const Sk4f& m, const Sk4f& a) {
226 return vfmaq_f32(a.fVec, f.fVec, m.fVec);
227 }
228#endif
229
mtklein550e9b02016-01-20 11:55:51 -0800230// It's possible that for our current use cases, representing this as
231// half a uint16x8_t might be better than representing it as a uint16x4_t.
232// It'd make conversion to Sk4b one step simpler.
233template <>
234class SkNx<4, uint16_t> {
235public:
Mike Klein7c78f3a2016-10-19 09:21:11 -0400236 AI SkNx(const uint16x4_t& vec) : fVec(vec) {}
mtklein550e9b02016-01-20 11:55:51 -0800237
Mike Klein7c78f3a2016-10-19 09:21:11 -0400238 AI SkNx() {}
239 AI SkNx(uint16_t val) : fVec(vdup_n_u16(val)) {}
240 AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d) {
mtklein7c0db752016-07-30 14:18:49 -0700241 fVec = (uint16x4_t) { a,b,c,d };
242 }
243
Mike Klein7c78f3a2016-10-19 09:21:11 -0400244 AI static SkNx Load(const void* ptr) { return vld1_u16((const uint16_t*)ptr); }
245 AI void store(void* ptr) const { vst1_u16((uint16_t*)ptr, fVec); }
mtklein550e9b02016-01-20 11:55:51 -0800246
Mike Klein7c78f3a2016-10-19 09:21:11 -0400247 AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
Mike Klein33cbfd72016-10-06 11:09:27 -0400248 uint16x4x4_t rgba = vld4_u16((const uint16_t*)ptr);
249 *r = rgba.val[0];
250 *g = rgba.val[1];
251 *b = rgba.val[2];
252 *a = rgba.val[3];
253 }
Matt Sarett5bee0b62017-01-19 12:04:32 -0500254 AI static void Load3(const void* ptr, SkNx* r, SkNx* g, SkNx* b) {
255 uint16x4x3_t rgba = vld3_u16((const uint16_t*)ptr);
256 *r = rgba.val[0];
257 *g = rgba.val[1];
258 *b = rgba.val[2];
259 }
Mike Klein7c78f3a2016-10-19 09:21:11 -0400260 AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
Mike Klein33cbfd72016-10-06 11:09:27 -0400261 uint16x4x4_t rgba = {{
262 r.fVec,
263 g.fVec,
264 b.fVec,
265 a.fVec,
266 }};
267 vst4_u16((uint16_t*) dst, rgba);
268 }
269
Mike Klein7c78f3a2016-10-19 09:21:11 -0400270 AI SkNx operator + (const SkNx& o) const { return vadd_u16(fVec, o.fVec); }
271 AI SkNx operator - (const SkNx& o) const { return vsub_u16(fVec, o.fVec); }
272 AI SkNx operator * (const SkNx& o) const { return vmul_u16(fVec, o.fVec); }
Matt Sarett379938e2017-01-12 18:34:29 -0500273 AI SkNx operator & (const SkNx& o) const { return vand_u16(fVec, o.fVec); }
274 AI SkNx operator | (const SkNx& o) const { return vorr_u16(fVec, o.fVec); }
mtklein550e9b02016-01-20 11:55:51 -0800275
Mike Klein7c78f3a2016-10-19 09:21:11 -0400276 AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
277 AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
mtklein550e9b02016-01-20 11:55:51 -0800278
Mike Klein7c78f3a2016-10-19 09:21:11 -0400279 AI static SkNx Min(const SkNx& a, const SkNx& b) { return vmin_u16(a.fVec, b.fVec); }
mtklein550e9b02016-01-20 11:55:51 -0800280
Mike Klein7c78f3a2016-10-19 09:21:11 -0400281 AI uint16_t operator[](int k) const {
mtklein7c0db752016-07-30 14:18:49 -0700282 SkASSERT(0 <= k && k < 4);
283 union { uint16x4_t v; uint16_t us[4]; } pun = {fVec};
284 return pun.us[k&3];
285 }
mtklein550e9b02016-01-20 11:55:51 -0800286
Mike Klein7c78f3a2016-10-19 09:21:11 -0400287 AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
mtklein550e9b02016-01-20 11:55:51 -0800288 return vbsl_u16(fVec, t.fVec, e.fVec);
289 }
290
291 uint16x4_t fVec;
292};
293
mtkleind2ffd362015-05-12 06:11:21 -0700294template <>
mtklein6c221b42015-11-20 13:53:19 -0800295class SkNx<8, uint16_t> {
mtkleind2ffd362015-05-12 06:11:21 -0700296public:
Mike Klein7c78f3a2016-10-19 09:21:11 -0400297 AI SkNx(const uint16x8_t& vec) : fVec(vec) {}
mtkleind2ffd362015-05-12 06:11:21 -0700298
Mike Klein7c78f3a2016-10-19 09:21:11 -0400299 AI SkNx() {}
300 AI SkNx(uint16_t val) : fVec(vdupq_n_u16(val)) {}
301 AI static SkNx Load(const void* ptr) { return vld1q_u16((const uint16_t*)ptr); }
mtklein7c0db752016-07-30 14:18:49 -0700302
Mike Klein7c78f3a2016-10-19 09:21:11 -0400303 AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d,
304 uint16_t e, uint16_t f, uint16_t g, uint16_t h) {
mtklein7c0db752016-07-30 14:18:49 -0700305 fVec = (uint16x8_t) { a,b,c,d, e,f,g,h };
306 }
307
Mike Klein7c78f3a2016-10-19 09:21:11 -0400308 AI void store(void* ptr) const { vst1q_u16((uint16_t*)ptr, fVec); }
mtkleind2ffd362015-05-12 06:11:21 -0700309
Mike Klein7c78f3a2016-10-19 09:21:11 -0400310 AI SkNx operator + (const SkNx& o) const { return vaddq_u16(fVec, o.fVec); }
311 AI SkNx operator - (const SkNx& o) const { return vsubq_u16(fVec, o.fVec); }
312 AI SkNx operator * (const SkNx& o) const { return vmulq_u16(fVec, o.fVec); }
Matt Sarett379938e2017-01-12 18:34:29 -0500313 AI SkNx operator & (const SkNx& o) const { return vandq_u16(fVec, o.fVec); }
314 AI SkNx operator | (const SkNx& o) const { return vorrq_u16(fVec, o.fVec); }
mtkleind2ffd362015-05-12 06:11:21 -0700315
Mike Klein7c78f3a2016-10-19 09:21:11 -0400316 AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
317 AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
mtkleind2ffd362015-05-12 06:11:21 -0700318
Mike Klein7c78f3a2016-10-19 09:21:11 -0400319 AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u16(a.fVec, b.fVec); }
mtklein27e517a2015-05-14 17:53:04 -0700320
Mike Klein7c78f3a2016-10-19 09:21:11 -0400321 AI uint16_t operator[](int k) const {
mtklein7c0db752016-07-30 14:18:49 -0700322 SkASSERT(0 <= k && k < 8);
323 union { uint16x8_t v; uint16_t us[8]; } pun = {fVec};
324 return pun.us[k&7];
325 }
mtkleind2ffd362015-05-12 06:11:21 -0700326
Mike Klein7c78f3a2016-10-19 09:21:11 -0400327 AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
mtkleincf4e5672015-07-27 06:12:05 -0700328 return vbslq_u16(fVec, t.fVec, e.fVec);
mtklein4be181e2015-07-14 10:54:19 -0700329 }
330
mtkleind2ffd362015-05-12 06:11:21 -0700331 uint16x8_t fVec;
332};
333
334template <>
mtklein6f37b4a2015-12-14 11:25:18 -0800335class SkNx<4, uint8_t> {
336public:
mtkleina5e1e332016-07-26 10:07:34 -0700337 typedef uint32_t __attribute__((aligned(1))) unaligned_uint32_t;
338
Mike Klein7c78f3a2016-10-19 09:21:11 -0400339 AI SkNx(const uint8x8_t& vec) : fVec(vec) {}
mtklein6f37b4a2015-12-14 11:25:18 -0800340
Mike Klein7c78f3a2016-10-19 09:21:11 -0400341 AI SkNx() {}
342 AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d) {
mtklein7c0db752016-07-30 14:18:49 -0700343 fVec = (uint8x8_t){a,b,c,d, 0,0,0,0};
344 }
Mike Klein7c78f3a2016-10-19 09:21:11 -0400345 AI static SkNx Load(const void* ptr) {
mtkleina5e1e332016-07-26 10:07:34 -0700346 return (uint8x8_t)vld1_dup_u32((const unaligned_uint32_t*)ptr);
mtklein6f37b4a2015-12-14 11:25:18 -0800347 }
Mike Klein7c78f3a2016-10-19 09:21:11 -0400348 AI void store(void* ptr) const {
mtkleina5e1e332016-07-26 10:07:34 -0700349 return vst1_lane_u32((unaligned_uint32_t*)ptr, (uint32x2_t)fVec, 0);
mtklein6f37b4a2015-12-14 11:25:18 -0800350 }
Mike Klein7c78f3a2016-10-19 09:21:11 -0400351 AI uint8_t operator[](int k) const {
mtklein7c0db752016-07-30 14:18:49 -0700352 SkASSERT(0 <= k && k < 4);
353 union { uint8x8_t v; uint8_t us[8]; } pun = {fVec};
354 return pun.us[k&3];
355 }
mtklein6f37b4a2015-12-14 11:25:18 -0800356
mtklein7c0db752016-07-30 14:18:49 -0700357 // TODO as needed
mtklein6f37b4a2015-12-14 11:25:18 -0800358
359 uint8x8_t fVec;
360};
361
362template <>
mtklein6c221b42015-11-20 13:53:19 -0800363class SkNx<16, uint8_t> {
mtkleind2ffd362015-05-12 06:11:21 -0700364public:
Mike Klein7c78f3a2016-10-19 09:21:11 -0400365 AI SkNx(const uint8x16_t& vec) : fVec(vec) {}
mtkleind2ffd362015-05-12 06:11:21 -0700366
Mike Klein7c78f3a2016-10-19 09:21:11 -0400367 AI SkNx() {}
368 AI SkNx(uint8_t val) : fVec(vdupq_n_u8(val)) {}
369 AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d,
370 uint8_t e, uint8_t f, uint8_t g, uint8_t h,
371 uint8_t i, uint8_t j, uint8_t k, uint8_t l,
372 uint8_t m, uint8_t n, uint8_t o, uint8_t p) {
mtklein7c0db752016-07-30 14:18:49 -0700373 fVec = (uint8x16_t) { a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p };
374 }
mtkleind2ffd362015-05-12 06:11:21 -0700375
Mike Klein7c78f3a2016-10-19 09:21:11 -0400376 AI static SkNx Load(const void* ptr) { return vld1q_u8((const uint8_t*)ptr); }
377 AI void store(void* ptr) const { vst1q_u8((uint8_t*)ptr, fVec); }
mtkleind2ffd362015-05-12 06:11:21 -0700378
Mike Klein7c78f3a2016-10-19 09:21:11 -0400379 AI SkNx saturatedAdd(const SkNx& o) const { return vqaddq_u8(fVec, o.fVec); }
mtklein04d24a32015-05-13 08:02:14 -0700380
Mike Klein7c78f3a2016-10-19 09:21:11 -0400381 AI SkNx operator + (const SkNx& o) const { return vaddq_u8(fVec, o.fVec); }
382 AI SkNx operator - (const SkNx& o) const { return vsubq_u8(fVec, o.fVec); }
mtkleind2ffd362015-05-12 06:11:21 -0700383
Mike Klein7c78f3a2016-10-19 09:21:11 -0400384 AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u8(a.fVec, b.fVec); }
385 AI SkNx operator < (const SkNx& o) const { return vcltq_u8(fVec, o.fVec); }
mtklein27e517a2015-05-14 17:53:04 -0700386
Mike Klein7c78f3a2016-10-19 09:21:11 -0400387 AI uint8_t operator[](int k) const {
mtklein7c0db752016-07-30 14:18:49 -0700388 SkASSERT(0 <= k && k < 16);
389 union { uint8x16_t v; uint8_t us[16]; } pun = {fVec};
390 return pun.us[k&15];
391 }
mtkleind2ffd362015-05-12 06:11:21 -0700392
Mike Klein7c78f3a2016-10-19 09:21:11 -0400393 AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
mtkleincf4e5672015-07-27 06:12:05 -0700394 return vbslq_u8(fVec, t.fVec, e.fVec);
mtkleinb5e86112015-06-24 15:18:39 -0700395 }
396
mtkleind2ffd362015-05-12 06:11:21 -0700397 uint8x16_t fVec;
398};
399
mtkleinf8f90e42016-03-21 10:04:46 -0700400template <>
mtkleind05a8752016-07-29 10:10:15 -0700401class SkNx<4, int32_t> {
mtkleinf8f90e42016-03-21 10:04:46 -0700402public:
Mike Klein7c78f3a2016-10-19 09:21:11 -0400403 AI SkNx(const int32x4_t& vec) : fVec(vec) {}
mtkleinf8f90e42016-03-21 10:04:46 -0700404
Mike Klein7c78f3a2016-10-19 09:21:11 -0400405 AI SkNx() {}
406 AI SkNx(int32_t v) {
mtklein7c0db752016-07-30 14:18:49 -0700407 fVec = vdupq_n_s32(v);
408 }
Mike Klein7c78f3a2016-10-19 09:21:11 -0400409 AI SkNx(int32_t a, int32_t b, int32_t c, int32_t d) {
mtklein7c0db752016-07-30 14:18:49 -0700410 fVec = (int32x4_t){a,b,c,d};
411 }
Mike Klein7c78f3a2016-10-19 09:21:11 -0400412 AI static SkNx Load(const void* ptr) {
mtklein7c0db752016-07-30 14:18:49 -0700413 return vld1q_s32((const int32_t*)ptr);
414 }
Mike Klein7c78f3a2016-10-19 09:21:11 -0400415 AI void store(void* ptr) const {
mtklein7c0db752016-07-30 14:18:49 -0700416 return vst1q_s32((int32_t*)ptr, fVec);
417 }
Mike Klein7c78f3a2016-10-19 09:21:11 -0400418 AI int32_t operator[](int k) const {
mtklein7c0db752016-07-30 14:18:49 -0700419 SkASSERT(0 <= k && k < 4);
420 union { int32x4_t v; int32_t is[4]; } pun = {fVec};
421 return pun.is[k&3];
422 }
mtkleinf8f90e42016-03-21 10:04:46 -0700423
Mike Klein7c78f3a2016-10-19 09:21:11 -0400424 AI SkNx operator + (const SkNx& o) const { return vaddq_s32(fVec, o.fVec); }
425 AI SkNx operator - (const SkNx& o) const { return vsubq_s32(fVec, o.fVec); }
426 AI SkNx operator * (const SkNx& o) const { return vmulq_s32(fVec, o.fVec); }
mtkleinf8f90e42016-03-21 10:04:46 -0700427
Mike Klein7c78f3a2016-10-19 09:21:11 -0400428 AI SkNx operator & (const SkNx& o) const { return vandq_s32(fVec, o.fVec); }
429 AI SkNx operator | (const SkNx& o) const { return vorrq_s32(fVec, o.fVec); }
430 AI SkNx operator ^ (const SkNx& o) const { return veorq_s32(fVec, o.fVec); }
mtklein64f061a2016-06-17 12:09:16 -0700431
Mike Klein7c78f3a2016-10-19 09:21:11 -0400432 AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
433 AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
mtkleinf8f90e42016-03-21 10:04:46 -0700434
Mike Klein7c78f3a2016-10-19 09:21:11 -0400435 AI SkNx operator == (const SkNx& o) const {
mtklein7c0db752016-07-30 14:18:49 -0700436 return vreinterpretq_s32_u32(vceqq_s32(fVec, o.fVec));
437 }
Mike Klein7c78f3a2016-10-19 09:21:11 -0400438 AI SkNx operator < (const SkNx& o) const {
mtklein7c0db752016-07-30 14:18:49 -0700439 return vreinterpretq_s32_u32(vcltq_s32(fVec, o.fVec));
440 }
Mike Klein7c78f3a2016-10-19 09:21:11 -0400441 AI SkNx operator > (const SkNx& o) const {
mtklein7c0db752016-07-30 14:18:49 -0700442 return vreinterpretq_s32_u32(vcgtq_s32(fVec, o.fVec));
443 }
mtklein58e389b2016-07-15 07:00:11 -0700444
Mike Klein7c78f3a2016-10-19 09:21:11 -0400445 AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_s32(a.fVec, b.fVec); }
Yuqian Li7da6ba22017-07-12 13:36:05 -0400446 AI static SkNx Max(const SkNx& a, const SkNx& b) { return vmaxq_s32(a.fVec, b.fVec); }
mtklein7c0db752016-07-30 14:18:49 -0700447 // TODO as needed
mtkleinf8f90e42016-03-21 10:04:46 -0700448
Mike Klein7c78f3a2016-10-19 09:21:11 -0400449 AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
mtklein58e389b2016-07-15 07:00:11 -0700450 return vbslq_s32(vreinterpretq_u32_s32(fVec), t.fVec, e.fVec);
451 }
452
Yuqian Li7da6ba22017-07-12 13:36:05 -0400453 AI SkNx abs() const { return vabsq_s32(fVec); }
454
mtkleinf8f90e42016-03-21 10:04:46 -0700455 int32x4_t fVec;
456};
457
mtkleind05a8752016-07-29 10:10:15 -0700458template <>
459class SkNx<4, uint32_t> {
460public:
Mike Klein7c78f3a2016-10-19 09:21:11 -0400461 AI SkNx(const uint32x4_t& vec) : fVec(vec) {}
mtkleind05a8752016-07-29 10:10:15 -0700462
Mike Klein7c78f3a2016-10-19 09:21:11 -0400463 AI SkNx() {}
464 AI SkNx(uint32_t v) {
mtklein7c0db752016-07-30 14:18:49 -0700465 fVec = vdupq_n_u32(v);
466 }
Mike Klein7c78f3a2016-10-19 09:21:11 -0400467 AI SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
mtklein7c0db752016-07-30 14:18:49 -0700468 fVec = (uint32x4_t){a,b,c,d};
469 }
Mike Klein7c78f3a2016-10-19 09:21:11 -0400470 AI static SkNx Load(const void* ptr) {
mtklein7c0db752016-07-30 14:18:49 -0700471 return vld1q_u32((const uint32_t*)ptr);
472 }
Mike Klein7c78f3a2016-10-19 09:21:11 -0400473 AI void store(void* ptr) const {
mtklein7c0db752016-07-30 14:18:49 -0700474 return vst1q_u32((uint32_t*)ptr, fVec);
475 }
Mike Klein7c78f3a2016-10-19 09:21:11 -0400476 AI uint32_t operator[](int k) const {
mtklein7c0db752016-07-30 14:18:49 -0700477 SkASSERT(0 <= k && k < 4);
478 union { uint32x4_t v; uint32_t us[4]; } pun = {fVec};
479 return pun.us[k&3];
480 }
mtkleind05a8752016-07-29 10:10:15 -0700481
Mike Klein7c78f3a2016-10-19 09:21:11 -0400482 AI SkNx operator + (const SkNx& o) const { return vaddq_u32(fVec, o.fVec); }
483 AI SkNx operator - (const SkNx& o) const { return vsubq_u32(fVec, o.fVec); }
484 AI SkNx operator * (const SkNx& o) const { return vmulq_u32(fVec, o.fVec); }
mtkleind05a8752016-07-29 10:10:15 -0700485
Mike Klein7c78f3a2016-10-19 09:21:11 -0400486 AI SkNx operator & (const SkNx& o) const { return vandq_u32(fVec, o.fVec); }
487 AI SkNx operator | (const SkNx& o) const { return vorrq_u32(fVec, o.fVec); }
488 AI SkNx operator ^ (const SkNx& o) const { return veorq_u32(fVec, o.fVec); }
mtkleind05a8752016-07-29 10:10:15 -0700489
Mike Klein7c78f3a2016-10-19 09:21:11 -0400490 AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
491 AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
mtkleind05a8752016-07-29 10:10:15 -0700492
Mike Klein7c78f3a2016-10-19 09:21:11 -0400493 AI SkNx operator == (const SkNx& o) const { return vceqq_u32(fVec, o.fVec); }
494 AI SkNx operator < (const SkNx& o) const { return vcltq_u32(fVec, o.fVec); }
495 AI SkNx operator > (const SkNx& o) const { return vcgtq_u32(fVec, o.fVec); }
mtkleind05a8752016-07-29 10:10:15 -0700496
Mike Klein7c78f3a2016-10-19 09:21:11 -0400497 AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u32(a.fVec, b.fVec); }
mtklein7c0db752016-07-30 14:18:49 -0700498 // TODO as needed
mtkleind05a8752016-07-29 10:10:15 -0700499
Herb Derby5eb15282017-10-10 17:14:18 -0400500 AI SkNx mulHi(const SkNx& m) const {
501 uint64x2_t hi = vmull_u32(vget_high_u32(fVec), vget_high_u32(m.fVec));
502 uint64x2_t lo = vmull_u32( vget_low_u32(fVec), vget_low_u32(m.fVec));
503
504 return { vcombine_u32(vshrn_n_u64(lo,32), vshrn_n_u64(hi,32)) };
505 }
506
Mike Klein7c78f3a2016-10-19 09:21:11 -0400507 AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
mtkleind05a8752016-07-29 10:10:15 -0700508 return vbslq_u32(fVec, t.fVec, e.fVec);
509 }
510
511 uint32x4_t fVec;
512};
513
Mike Klein7c78f3a2016-10-19 09:21:11 -0400514template<> AI /*static*/ Sk4i SkNx_cast<int32_t, float>(const Sk4f& src) {
mtkleinf8f90e42016-03-21 10:04:46 -0700515 return vcvtq_s32_f32(src.fVec);
516
517}
Mike Klein7c78f3a2016-10-19 09:21:11 -0400518template<> AI /*static*/ Sk4f SkNx_cast<float, int32_t>(const Sk4i& src) {
mtkleinf8f90e42016-03-21 10:04:46 -0700519 return vcvtq_f32_s32(src.fVec);
520}
Mike Klein7c78f3a2016-10-19 09:21:11 -0400521template<> AI /*static*/ Sk4f SkNx_cast<float, uint32_t>(const Sk4u& src) {
mtkleind05a8752016-07-29 10:10:15 -0700522 return SkNx_cast<float>(Sk4i::Load(&src));
523}
mtkleinf8f90e42016-03-21 10:04:46 -0700524
Mike Klein7c78f3a2016-10-19 09:21:11 -0400525template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) {
mtkleinbe8c19e2016-02-19 09:40:24 -0800526 return vqmovn_u32(vcvtq_u32_f32(src.fVec));
527}
528
Mike Klein7c78f3a2016-10-19 09:21:11 -0400529template<> AI /*static*/ Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) {
mtkleinbe8c19e2016-02-19 09:40:24 -0800530 return vcvtq_f32_u32(vmovl_u16(src.fVec));
531}
532
Mike Klein7c78f3a2016-10-19 09:21:11 -0400533template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) {
mtklein6f37b4a2015-12-14 11:25:18 -0800534 uint32x4_t _32 = vcvtq_u32_f32(src.fVec);
535 uint16x4_t _16 = vqmovn_u32(_32);
536 return vqmovn_u16(vcombine_u16(_16, _16));
537}
538
Herb Derby5eb15282017-10-10 17:14:18 -0400539template<> AI /*static*/ Sk4u SkNx_cast<uint32_t, uint8_t>(const Sk4b& src) {
Mike Klein06a65e22016-11-17 12:39:09 -0500540 uint16x8_t _16 = vmovl_u8(src.fVec);
Herb Derby5eb15282017-10-10 17:14:18 -0400541 return vmovl_u16(vget_low_u16(_16));
542}
543
544template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint8_t>(const Sk4b& src) {
545 return vreinterpretq_s32_u32(SkNx_cast<uint32_t>(src).fVec);
Mike Klein06a65e22016-11-17 12:39:09 -0500546}
547
Mike Klein7c78f3a2016-10-19 09:21:11 -0400548template<> AI /*static*/ Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) {
Mike Kleinb6ab4ae2016-11-17 14:33:11 -0500549 return vcvtq_f32_s32(SkNx_cast<int32_t>(src).fVec);
mtklein6f37b4a2015-12-14 11:25:18 -0800550}
551
Mike Klein7c78f3a2016-10-19 09:21:11 -0400552template<> AI /*static*/ Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) {
mtkleinf8f90e42016-03-21 10:04:46 -0700553 Sk8f ab, cd;
554 SkNx_split(src, &ab, &cd);
555
556 Sk4f a,b,c,d;
557 SkNx_split(ab, &a, &b);
558 SkNx_split(cd, &c, &d);
559 return vuzpq_u8(vuzpq_u8((uint8x16_t)vcvtq_u32_f32(a.fVec),
560 (uint8x16_t)vcvtq_u32_f32(b.fVec)).val[0],
561 vuzpq_u8((uint8x16_t)vcvtq_u32_f32(c.fVec),
562 (uint8x16_t)vcvtq_u32_f32(d.fVec)).val[0]).val[0];
mtklein6f37b4a2015-12-14 11:25:18 -0800563}
564
Mike Klein7c78f3a2016-10-19 09:21:11 -0400565template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) {
mtklein550e9b02016-01-20 11:55:51 -0800566 return vget_low_u16(vmovl_u8(src.fVec));
567}
568
Mike Klein7c78f3a2016-10-19 09:21:11 -0400569template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) {
mtklein550e9b02016-01-20 11:55:51 -0800570 return vmovn_u16(vcombine_u16(src.fVec, src.fVec));
571}
572
Mike Klein7c78f3a2016-10-19 09:21:11 -0400573template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, int32_t>(const Sk4i& src) {
msarett7d3ff712016-07-12 14:55:45 -0700574 uint16x4_t _16 = vqmovun_s32(src.fVec);
575 return vqmovn_u16(vcombine_u16(_16, _16));
576}
577
Herb Derby0f96bb32017-09-13 16:46:05 -0400578template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, uint32_t>(const Sk4u& src) {
579 uint16x4_t _16 = vqmovn_u32(src.fVec);
580 return vqmovn_u16(vcombine_u16(_16, _16));
581}
582
Mike Klein7c78f3a2016-10-19 09:21:11 -0400583template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint16_t>(const Sk4h& src) {
mtklein58e389b2016-07-15 07:00:11 -0700584 return vreinterpretq_s32_u32(vmovl_u16(src.fVec));
585}
586
Mike Klein7c78f3a2016-10-19 09:21:11 -0400587template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, int32_t>(const Sk4i& src) {
mtklein58e389b2016-07-15 07:00:11 -0700588 return vmovn_u32(vreinterpretq_u32_s32(src.fVec));
589}
590
Mike Klein7c78f3a2016-10-19 09:21:11 -0400591template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint32_t>(const Sk4u& src) {
msarett15ee3de2016-08-02 11:30:30 -0700592 return vreinterpretq_s32_u32(src.fVec);
593}
594
Mike Klein7c78f3a2016-10-19 09:21:11 -0400595AI static Sk4i Sk4f_round(const Sk4f& x) {
msarett7d3ff712016-07-12 14:55:45 -0700596 return vcvtq_s32_f32((x + 0.5f).fVec);
597}
598
Mike Klein1e764642016-10-14 17:09:03 -0400599} // namespace
600
mtkleinc9adb052015-03-30 10:50:27 -0700601#endif//SkNx_neon_DEFINED