mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2015 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | #ifndef SkNx_neon_DEFINED |
| 9 | #define SkNx_neon_DEFINED |
| 10 | |
mtklein | e18fa44 | 2016-06-09 13:40:56 -0700 | [diff] [blame] | 11 | #include <arm_neon.h> |
| 12 | |
Mike Klein | c33d614 | 2018-12-12 08:47:54 -0500 | [diff] [blame] | 13 | namespace { // NOLINT(google-build-namespaces) |
Mike Klein | 1e76464 | 2016-10-14 17:09:03 -0400 | [diff] [blame] | 14 | |
Chris Dalton | 89c5e88 | 2018-06-08 11:46:42 -0600 | [diff] [blame] | 15 | // ARMv8 has vrndm(q)_f32 to floor floats. Here we emulate it: |
mtklein | e5fe9a4 | 2016-02-10 07:55:56 -0800 | [diff] [blame] | 16 | // - roundtrip through integers via truncation |
| 17 | // - subtract 1 if that's too big (possible for negative values). |
| 18 | // This restricts the domain of our inputs to a maximum somehwere around 2^31. Seems plenty big. |
Chris Dalton | 89c5e88 | 2018-06-08 11:46:42 -0600 | [diff] [blame] | 19 | AI static float32x4_t emulate_vrndmq_f32(float32x4_t v) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 20 | auto roundtrip = vcvtq_f32_s32(vcvtq_s32_f32(v)); |
| 21 | auto too_big = vcgtq_f32(roundtrip, v); |
| 22 | return vsubq_f32(roundtrip, (float32x4_t)vandq_u32(too_big, (uint32x4_t)vdupq_n_f32(1))); |
mtklein | 126626e | 2016-02-09 15:41:36 -0800 | [diff] [blame] | 23 | } |
Chris Dalton | 89c5e88 | 2018-06-08 11:46:42 -0600 | [diff] [blame] | 24 | AI static float32x2_t emulate_vrndm_f32(float32x2_t v) { |
| 25 | auto roundtrip = vcvt_f32_s32(vcvt_s32_f32(v)); |
| 26 | auto too_big = vcgt_f32(roundtrip, v); |
| 27 | return vsub_f32(roundtrip, (float32x2_t)vand_u32(too_big, (uint32x2_t)vdup_n_f32(1))); |
| 28 | } |
mtklein | 126626e | 2016-02-09 15:41:36 -0800 | [diff] [blame] | 29 | |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 30 | template <> |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 31 | class SkNx<2, float> { |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 32 | public: |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 33 | AI SkNx(float32x2_t vec) : fVec(vec) {} |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 34 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 35 | AI SkNx() {} |
| 36 | AI SkNx(float val) : fVec(vdup_n_f32(val)) {} |
| 37 | AI SkNx(float a, float b) { fVec = (float32x2_t) { a, b }; } |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 38 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 39 | AI static SkNx Load(const void* ptr) { return vld1_f32((const float*)ptr); } |
| 40 | AI void store(void* ptr) const { vst1_f32((float*)ptr, fVec); } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 41 | |
Chris Dalton | 21f6437 | 2018-04-11 14:01:04 -0600 | [diff] [blame] | 42 | AI static void Load2(const void* ptr, SkNx* x, SkNx* y) { |
| 43 | float32x2x2_t xy = vld2_f32((const float*) ptr); |
| 44 | *x = xy.val[0]; |
| 45 | *y = xy.val[1]; |
| 46 | } |
| 47 | |
Chris Dalton | 42f02aa | 2018-04-08 23:58:43 -0600 | [diff] [blame] | 48 | AI static void Store2(void* dst, const SkNx& a, const SkNx& b) { |
| 49 | float32x2x2_t ab = {{ |
| 50 | a.fVec, |
| 51 | b.fVec, |
| 52 | }}; |
| 53 | vst2_f32((float*) dst, ab); |
| 54 | } |
| 55 | |
Chris Dalton | 0cb7587 | 2017-12-01 13:23:05 -0700 | [diff] [blame] | 56 | AI static void Store3(void* dst, const SkNx& a, const SkNx& b, const SkNx& c) { |
| 57 | float32x2x3_t abc = {{ |
| 58 | a.fVec, |
| 59 | b.fVec, |
| 60 | c.fVec, |
| 61 | }}; |
| 62 | vst3_f32((float*) dst, abc); |
| 63 | } |
| 64 | |
Chris Dalton | 6f8fa4e | 2018-02-06 17:55:30 -0700 | [diff] [blame] | 65 | AI static void Store4(void* dst, const SkNx& a, const SkNx& b, const SkNx& c, const SkNx& d) { |
| 66 | float32x2x4_t abcd = {{ |
| 67 | a.fVec, |
| 68 | b.fVec, |
| 69 | c.fVec, |
| 70 | d.fVec, |
| 71 | }}; |
| 72 | vst4_f32((float*) dst, abcd); |
| 73 | } |
| 74 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 75 | AI SkNx invert() const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 76 | float32x2_t est0 = vrecpe_f32(fVec), |
| 77 | est1 = vmul_f32(vrecps_f32(est0, fVec), est0); |
| 78 | return est1; |
| 79 | } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 80 | |
Chris Dalton | 7732f4f | 2017-08-28 14:45:40 -0600 | [diff] [blame] | 81 | AI SkNx operator - () const { return vneg_f32(fVec); } |
| 82 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 83 | AI SkNx operator + (const SkNx& o) const { return vadd_f32(fVec, o.fVec); } |
| 84 | AI SkNx operator - (const SkNx& o) const { return vsub_f32(fVec, o.fVec); } |
| 85 | AI SkNx operator * (const SkNx& o) const { return vmul_f32(fVec, o.fVec); } |
| 86 | AI SkNx operator / (const SkNx& o) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 87 | #if defined(SK_CPU_ARM64) |
| 88 | return vdiv_f32(fVec, o.fVec); |
| 89 | #else |
| 90 | float32x2_t est0 = vrecpe_f32(o.fVec), |
| 91 | est1 = vmul_f32(vrecps_f32(est0, o.fVec), est0), |
| 92 | est2 = vmul_f32(vrecps_f32(est1, o.fVec), est1); |
| 93 | return vmul_f32(fVec, est2); |
| 94 | #endif |
| 95 | } |
| 96 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 97 | AI SkNx operator==(const SkNx& o) const { return vreinterpret_f32_u32(vceq_f32(fVec, o.fVec)); } |
| 98 | AI SkNx operator <(const SkNx& o) const { return vreinterpret_f32_u32(vclt_f32(fVec, o.fVec)); } |
| 99 | AI SkNx operator >(const SkNx& o) const { return vreinterpret_f32_u32(vcgt_f32(fVec, o.fVec)); } |
| 100 | AI SkNx operator<=(const SkNx& o) const { return vreinterpret_f32_u32(vcle_f32(fVec, o.fVec)); } |
| 101 | AI SkNx operator>=(const SkNx& o) const { return vreinterpret_f32_u32(vcge_f32(fVec, o.fVec)); } |
| 102 | AI SkNx operator!=(const SkNx& o) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 103 | return vreinterpret_f32_u32(vmvn_u32(vceq_f32(fVec, o.fVec))); |
| 104 | } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 105 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 106 | AI static SkNx Min(const SkNx& l, const SkNx& r) { return vmin_f32(l.fVec, r.fVec); } |
| 107 | AI static SkNx Max(const SkNx& l, const SkNx& r) { return vmax_f32(l.fVec, r.fVec); } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 108 | |
Chris Dalton | 7732f4f | 2017-08-28 14:45:40 -0600 | [diff] [blame] | 109 | AI SkNx abs() const { return vabs_f32(fVec); } |
Chris Dalton | 89c5e88 | 2018-06-08 11:46:42 -0600 | [diff] [blame] | 110 | AI SkNx floor() const { |
| 111 | #if defined(SK_CPU_ARM64) |
| 112 | return vrndm_f32(fVec); |
| 113 | #else |
| 114 | return emulate_vrndm_f32(fVec); |
| 115 | #endif |
| 116 | } |
Chris Dalton | 7732f4f | 2017-08-28 14:45:40 -0600 | [diff] [blame] | 117 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 118 | AI SkNx rsqrt() const { |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 119 | float32x2_t est0 = vrsqrte_f32(fVec); |
mtklein | d7c014f | 2015-04-27 14:22:32 -0700 | [diff] [blame] | 120 | return vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0); |
| 121 | } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 122 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 123 | AI SkNx sqrt() const { |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 124 | #if defined(SK_CPU_ARM64) |
| 125 | return vsqrt_f32(fVec); |
| 126 | #else |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 127 | float32x2_t est0 = vrsqrte_f32(fVec), |
| 128 | est1 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0), |
| 129 | est2 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est1, est1)), est1); |
| 130 | return vmul_f32(fVec, est2); |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 131 | #endif |
| 132 | } |
| 133 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 134 | AI float operator[](int k) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 135 | SkASSERT(0 <= k && k < 2); |
| 136 | union { float32x2_t v; float fs[2]; } pun = {fVec}; |
| 137 | return pun.fs[k&1]; |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 138 | } |
| 139 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 140 | AI bool allTrue() const { |
Mike Klein | 15eb1e9 | 2018-08-31 11:21:27 -0400 | [diff] [blame] | 141 | #if defined(SK_CPU_ARM64) |
Mike Klein | 68ff92f | 2018-03-26 13:04:14 -0400 | [diff] [blame] | 142 | return 0 != vminv_u32(vreinterpret_u32_f32(fVec)); |
| 143 | #else |
mtklein | b5e8611 | 2015-06-24 15:18:39 -0700 | [diff] [blame] | 144 | auto v = vreinterpret_u32_f32(fVec); |
| 145 | return vget_lane_u32(v,0) && vget_lane_u32(v,1); |
Mike Klein | 68ff92f | 2018-03-26 13:04:14 -0400 | [diff] [blame] | 146 | #endif |
mtklein | b5e8611 | 2015-06-24 15:18:39 -0700 | [diff] [blame] | 147 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 148 | AI bool anyTrue() const { |
Mike Klein | 15eb1e9 | 2018-08-31 11:21:27 -0400 | [diff] [blame] | 149 | #if defined(SK_CPU_ARM64) |
Mike Klein | 68ff92f | 2018-03-26 13:04:14 -0400 | [diff] [blame] | 150 | return 0 != vmaxv_u32(vreinterpret_u32_f32(fVec)); |
| 151 | #else |
mtklein | b5e8611 | 2015-06-24 15:18:39 -0700 | [diff] [blame] | 152 | auto v = vreinterpret_u32_f32(fVec); |
| 153 | return vget_lane_u32(v,0) || vget_lane_u32(v,1); |
Mike Klein | 68ff92f | 2018-03-26 13:04:14 -0400 | [diff] [blame] | 154 | #endif |
mtklein | b5e8611 | 2015-06-24 15:18:39 -0700 | [diff] [blame] | 155 | } |
| 156 | |
Chris Dalton | 7732f4f | 2017-08-28 14:45:40 -0600 | [diff] [blame] | 157 | AI SkNx thenElse(const SkNx& t, const SkNx& e) const { |
| 158 | return vbsl_f32(vreinterpret_u32_f32(fVec), t.fVec, e.fVec); |
| 159 | } |
| 160 | |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 161 | float32x2_t fVec; |
| 162 | }; |
| 163 | |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 164 | template <> |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 165 | class SkNx<4, float> { |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 166 | public: |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 167 | AI SkNx(float32x4_t vec) : fVec(vec) {} |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 168 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 169 | AI SkNx() {} |
| 170 | AI SkNx(float val) : fVec(vdupq_n_f32(val)) {} |
| 171 | AI SkNx(float a, float b, float c, float d) { fVec = (float32x4_t) { a, b, c, d }; } |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 172 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 173 | AI static SkNx Load(const void* ptr) { return vld1q_f32((const float*)ptr); } |
| 174 | AI void store(void* ptr) const { vst1q_f32((float*)ptr, fVec); } |
Mike Klein | 33cbfd7 | 2016-10-06 11:09:27 -0400 | [diff] [blame] | 175 | |
Mike Klein | 213d821 | 2017-11-30 12:07:20 -0500 | [diff] [blame] | 176 | AI static void Load2(const void* ptr, SkNx* x, SkNx* y) { |
| 177 | float32x4x2_t xy = vld2q_f32((const float*) ptr); |
| 178 | *x = xy.val[0]; |
| 179 | *y = xy.val[1]; |
| 180 | } |
| 181 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 182 | AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) { |
Mike Klein | 33cbfd7 | 2016-10-06 11:09:27 -0400 | [diff] [blame] | 183 | float32x4x4_t rgba = vld4q_f32((const float*) ptr); |
| 184 | *r = rgba.val[0]; |
| 185 | *g = rgba.val[1]; |
| 186 | *b = rgba.val[2]; |
| 187 | *a = rgba.val[3]; |
| 188 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 189 | AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) { |
Mike Klein | 33cbfd7 | 2016-10-06 11:09:27 -0400 | [diff] [blame] | 190 | float32x4x4_t rgba = {{ |
| 191 | r.fVec, |
| 192 | g.fVec, |
| 193 | b.fVec, |
| 194 | a.fVec, |
| 195 | }}; |
| 196 | vst4q_f32((float*) dst, rgba); |
| 197 | } |
| 198 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 199 | AI SkNx invert() const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 200 | float32x4_t est0 = vrecpeq_f32(fVec), |
| 201 | est1 = vmulq_f32(vrecpsq_f32(est0, fVec), est0); |
| 202 | return est1; |
| 203 | } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 204 | |
Chris Dalton | 7732f4f | 2017-08-28 14:45:40 -0600 | [diff] [blame] | 205 | AI SkNx operator - () const { return vnegq_f32(fVec); } |
| 206 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 207 | AI SkNx operator + (const SkNx& o) const { return vaddq_f32(fVec, o.fVec); } |
| 208 | AI SkNx operator - (const SkNx& o) const { return vsubq_f32(fVec, o.fVec); } |
| 209 | AI SkNx operator * (const SkNx& o) const { return vmulq_f32(fVec, o.fVec); } |
| 210 | AI SkNx operator / (const SkNx& o) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 211 | #if defined(SK_CPU_ARM64) |
| 212 | return vdivq_f32(fVec, o.fVec); |
| 213 | #else |
| 214 | float32x4_t est0 = vrecpeq_f32(o.fVec), |
| 215 | est1 = vmulq_f32(vrecpsq_f32(est0, o.fVec), est0), |
| 216 | est2 = vmulq_f32(vrecpsq_f32(est1, o.fVec), est1); |
| 217 | return vmulq_f32(fVec, est2); |
| 218 | #endif |
| 219 | } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 220 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 221 | AI SkNx operator==(const SkNx& o) const {return vreinterpretq_f32_u32(vceqq_f32(fVec, o.fVec));} |
| 222 | AI SkNx operator <(const SkNx& o) const {return vreinterpretq_f32_u32(vcltq_f32(fVec, o.fVec));} |
| 223 | AI SkNx operator >(const SkNx& o) const {return vreinterpretq_f32_u32(vcgtq_f32(fVec, o.fVec));} |
| 224 | AI SkNx operator<=(const SkNx& o) const {return vreinterpretq_f32_u32(vcleq_f32(fVec, o.fVec));} |
| 225 | AI SkNx operator>=(const SkNx& o) const {return vreinterpretq_f32_u32(vcgeq_f32(fVec, o.fVec));} |
| 226 | AI SkNx operator!=(const SkNx& o) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 227 | return vreinterpretq_f32_u32(vmvnq_u32(vceqq_f32(fVec, o.fVec))); |
| 228 | } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 229 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 230 | AI static SkNx Min(const SkNx& l, const SkNx& r) { return vminq_f32(l.fVec, r.fVec); } |
| 231 | AI static SkNx Max(const SkNx& l, const SkNx& r) { return vmaxq_f32(l.fVec, r.fVec); } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 232 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 233 | AI SkNx abs() const { return vabsq_f32(fVec); } |
| 234 | AI SkNx floor() const { |
mtklein | 126626e | 2016-02-09 15:41:36 -0800 | [diff] [blame] | 235 | #if defined(SK_CPU_ARM64) |
| 236 | return vrndmq_f32(fVec); |
| 237 | #else |
Chris Dalton | 89c5e88 | 2018-06-08 11:46:42 -0600 | [diff] [blame] | 238 | return emulate_vrndmq_f32(fVec); |
mtklein | 126626e | 2016-02-09 15:41:36 -0800 | [diff] [blame] | 239 | #endif |
| 240 | } |
| 241 | |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 242 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 243 | AI SkNx rsqrt() const { |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 244 | float32x4_t est0 = vrsqrteq_f32(fVec); |
mtklein | d7c014f | 2015-04-27 14:22:32 -0700 | [diff] [blame] | 245 | return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0); |
| 246 | } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 247 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 248 | AI SkNx sqrt() const { |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 249 | #if defined(SK_CPU_ARM64) |
| 250 | return vsqrtq_f32(fVec); |
| 251 | #else |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 252 | float32x4_t est0 = vrsqrteq_f32(fVec), |
| 253 | est1 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0), |
| 254 | est2 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est1, est1)), est1); |
| 255 | return vmulq_f32(fVec, est2); |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 256 | #endif |
| 257 | } |
| 258 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 259 | AI float operator[](int k) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 260 | SkASSERT(0 <= k && k < 4); |
| 261 | union { float32x4_t v; float fs[4]; } pun = {fVec}; |
| 262 | return pun.fs[k&3]; |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 263 | } |
| 264 | |
Chris Dalton | e3fda93 | 2018-04-11 13:18:09 -0600 | [diff] [blame] | 265 | AI float min() const { |
Mike Klein | 15eb1e9 | 2018-08-31 11:21:27 -0400 | [diff] [blame] | 266 | #if defined(SK_CPU_ARM64) |
Chris Dalton | e3fda93 | 2018-04-11 13:18:09 -0600 | [diff] [blame] | 267 | return vminvq_f32(fVec); |
| 268 | #else |
| 269 | SkNx min = Min(*this, vrev64q_f32(fVec)); |
Mike Klein | bf45c70 | 2018-06-11 11:56:57 -0400 | [diff] [blame] | 270 | return SkTMin(min[0], min[2]); |
Chris Dalton | e3fda93 | 2018-04-11 13:18:09 -0600 | [diff] [blame] | 271 | #endif |
| 272 | } |
| 273 | |
| 274 | AI float max() const { |
Mike Klein | 15eb1e9 | 2018-08-31 11:21:27 -0400 | [diff] [blame] | 275 | #if defined(SK_CPU_ARM64) |
Chris Dalton | e3fda93 | 2018-04-11 13:18:09 -0600 | [diff] [blame] | 276 | return vmaxvq_f32(fVec); |
| 277 | #else |
| 278 | SkNx max = Max(*this, vrev64q_f32(fVec)); |
Mike Klein | bf45c70 | 2018-06-11 11:56:57 -0400 | [diff] [blame] | 279 | return SkTMax(max[0], max[2]); |
Chris Dalton | e3fda93 | 2018-04-11 13:18:09 -0600 | [diff] [blame] | 280 | #endif |
| 281 | } |
| 282 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 283 | AI bool allTrue() const { |
Mike Klein | 15eb1e9 | 2018-08-31 11:21:27 -0400 | [diff] [blame] | 284 | #if defined(SK_CPU_ARM64) |
Mike Klein | 68ff92f | 2018-03-26 13:04:14 -0400 | [diff] [blame] | 285 | return 0 != vminvq_u32(vreinterpretq_u32_f32(fVec)); |
| 286 | #else |
mtklein | b5e8611 | 2015-06-24 15:18:39 -0700 | [diff] [blame] | 287 | auto v = vreinterpretq_u32_f32(fVec); |
| 288 | return vgetq_lane_u32(v,0) && vgetq_lane_u32(v,1) |
| 289 | && vgetq_lane_u32(v,2) && vgetq_lane_u32(v,3); |
Mike Klein | 68ff92f | 2018-03-26 13:04:14 -0400 | [diff] [blame] | 290 | #endif |
mtklein | b5e8611 | 2015-06-24 15:18:39 -0700 | [diff] [blame] | 291 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 292 | AI bool anyTrue() const { |
Mike Klein | 15eb1e9 | 2018-08-31 11:21:27 -0400 | [diff] [blame] | 293 | #if defined(SK_CPU_ARM64) |
Mike Klein | 68ff92f | 2018-03-26 13:04:14 -0400 | [diff] [blame] | 294 | return 0 != vmaxvq_u32(vreinterpretq_u32_f32(fVec)); |
| 295 | #else |
mtklein | b5e8611 | 2015-06-24 15:18:39 -0700 | [diff] [blame] | 296 | auto v = vreinterpretq_u32_f32(fVec); |
| 297 | return vgetq_lane_u32(v,0) || vgetq_lane_u32(v,1) |
| 298 | || vgetq_lane_u32(v,2) || vgetq_lane_u32(v,3); |
Mike Klein | 68ff92f | 2018-03-26 13:04:14 -0400 | [diff] [blame] | 299 | #endif |
mtklein | b5e8611 | 2015-06-24 15:18:39 -0700 | [diff] [blame] | 300 | } |
| 301 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 302 | AI SkNx thenElse(const SkNx& t, const SkNx& e) const { |
mtklein | cf4e567 | 2015-07-27 06:12:05 -0700 | [diff] [blame] | 303 | return vbslq_f32(vreinterpretq_u32_f32(fVec), t.fVec, e.fVec); |
mtklein | 2aab22a | 2015-06-26 10:46:31 -0700 | [diff] [blame] | 304 | } |
| 305 | |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 306 | float32x4_t fVec; |
| 307 | }; |
| 308 | |
Mike Klein | f0348c2 | 2016-11-03 14:43:48 -0400 | [diff] [blame] | 309 | #if defined(SK_CPU_ARM64) |
| 310 | AI static Sk4f SkNx_fma(const Sk4f& f, const Sk4f& m, const Sk4f& a) { |
| 311 | return vfmaq_f32(a.fVec, f.fVec, m.fVec); |
| 312 | } |
| 313 | #endif |
| 314 | |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 315 | // It's possible that for our current use cases, representing this as |
| 316 | // half a uint16x8_t might be better than representing it as a uint16x4_t. |
| 317 | // It'd make conversion to Sk4b one step simpler. |
| 318 | template <> |
| 319 | class SkNx<4, uint16_t> { |
| 320 | public: |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 321 | AI SkNx(const uint16x4_t& vec) : fVec(vec) {} |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 322 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 323 | AI SkNx() {} |
| 324 | AI SkNx(uint16_t val) : fVec(vdup_n_u16(val)) {} |
| 325 | AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 326 | fVec = (uint16x4_t) { a,b,c,d }; |
| 327 | } |
| 328 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 329 | AI static SkNx Load(const void* ptr) { return vld1_u16((const uint16_t*)ptr); } |
| 330 | AI void store(void* ptr) const { vst1_u16((uint16_t*)ptr, fVec); } |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 331 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 332 | AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) { |
Mike Klein | 33cbfd7 | 2016-10-06 11:09:27 -0400 | [diff] [blame] | 333 | uint16x4x4_t rgba = vld4_u16((const uint16_t*)ptr); |
| 334 | *r = rgba.val[0]; |
| 335 | *g = rgba.val[1]; |
| 336 | *b = rgba.val[2]; |
| 337 | *a = rgba.val[3]; |
| 338 | } |
Matt Sarett | 5bee0b6 | 2017-01-19 12:04:32 -0500 | [diff] [blame] | 339 | AI static void Load3(const void* ptr, SkNx* r, SkNx* g, SkNx* b) { |
| 340 | uint16x4x3_t rgba = vld3_u16((const uint16_t*)ptr); |
| 341 | *r = rgba.val[0]; |
| 342 | *g = rgba.val[1]; |
| 343 | *b = rgba.val[2]; |
| 344 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 345 | AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) { |
Mike Klein | 33cbfd7 | 2016-10-06 11:09:27 -0400 | [diff] [blame] | 346 | uint16x4x4_t rgba = {{ |
| 347 | r.fVec, |
| 348 | g.fVec, |
| 349 | b.fVec, |
| 350 | a.fVec, |
| 351 | }}; |
| 352 | vst4_u16((uint16_t*) dst, rgba); |
| 353 | } |
| 354 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 355 | AI SkNx operator + (const SkNx& o) const { return vadd_u16(fVec, o.fVec); } |
| 356 | AI SkNx operator - (const SkNx& o) const { return vsub_u16(fVec, o.fVec); } |
| 357 | AI SkNx operator * (const SkNx& o) const { return vmul_u16(fVec, o.fVec); } |
Matt Sarett | 379938e | 2017-01-12 18:34:29 -0500 | [diff] [blame] | 358 | AI SkNx operator & (const SkNx& o) const { return vand_u16(fVec, o.fVec); } |
| 359 | AI SkNx operator | (const SkNx& o) const { return vorr_u16(fVec, o.fVec); } |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 360 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 361 | AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; } |
| 362 | AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; } |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 363 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 364 | AI static SkNx Min(const SkNx& a, const SkNx& b) { return vmin_u16(a.fVec, b.fVec); } |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 365 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 366 | AI uint16_t operator[](int k) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 367 | SkASSERT(0 <= k && k < 4); |
| 368 | union { uint16x4_t v; uint16_t us[4]; } pun = {fVec}; |
| 369 | return pun.us[k&3]; |
| 370 | } |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 371 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 372 | AI SkNx thenElse(const SkNx& t, const SkNx& e) const { |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 373 | return vbsl_u16(fVec, t.fVec, e.fVec); |
| 374 | } |
| 375 | |
| 376 | uint16x4_t fVec; |
| 377 | }; |
| 378 | |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 379 | template <> |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 380 | class SkNx<8, uint16_t> { |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 381 | public: |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 382 | AI SkNx(const uint16x8_t& vec) : fVec(vec) {} |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 383 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 384 | AI SkNx() {} |
| 385 | AI SkNx(uint16_t val) : fVec(vdupq_n_u16(val)) {} |
| 386 | AI static SkNx Load(const void* ptr) { return vld1q_u16((const uint16_t*)ptr); } |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 387 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 388 | AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d, |
| 389 | uint16_t e, uint16_t f, uint16_t g, uint16_t h) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 390 | fVec = (uint16x8_t) { a,b,c,d, e,f,g,h }; |
| 391 | } |
| 392 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 393 | AI void store(void* ptr) const { vst1q_u16((uint16_t*)ptr, fVec); } |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 394 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 395 | AI SkNx operator + (const SkNx& o) const { return vaddq_u16(fVec, o.fVec); } |
| 396 | AI SkNx operator - (const SkNx& o) const { return vsubq_u16(fVec, o.fVec); } |
| 397 | AI SkNx operator * (const SkNx& o) const { return vmulq_u16(fVec, o.fVec); } |
Matt Sarett | 379938e | 2017-01-12 18:34:29 -0500 | [diff] [blame] | 398 | AI SkNx operator & (const SkNx& o) const { return vandq_u16(fVec, o.fVec); } |
| 399 | AI SkNx operator | (const SkNx& o) const { return vorrq_u16(fVec, o.fVec); } |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 400 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 401 | AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; } |
| 402 | AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; } |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 403 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 404 | AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u16(a.fVec, b.fVec); } |
mtklein | 27e517a | 2015-05-14 17:53:04 -0700 | [diff] [blame] | 405 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 406 | AI uint16_t operator[](int k) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 407 | SkASSERT(0 <= k && k < 8); |
| 408 | union { uint16x8_t v; uint16_t us[8]; } pun = {fVec}; |
| 409 | return pun.us[k&7]; |
| 410 | } |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 411 | |
Herb Derby | d1b3c78 | 2017-11-02 13:18:38 -0400 | [diff] [blame] | 412 | AI SkNx mulHi(const SkNx& m) const { |
| 413 | uint32x4_t hi = vmull_u16(vget_high_u16(fVec), vget_high_u16(m.fVec)); |
| 414 | uint32x4_t lo = vmull_u16( vget_low_u16(fVec), vget_low_u16(m.fVec)); |
| 415 | |
| 416 | return { vcombine_u16(vshrn_n_u32(lo,16), vshrn_n_u32(hi,16)) }; |
| 417 | } |
| 418 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 419 | AI SkNx thenElse(const SkNx& t, const SkNx& e) const { |
mtklein | cf4e567 | 2015-07-27 06:12:05 -0700 | [diff] [blame] | 420 | return vbslq_u16(fVec, t.fVec, e.fVec); |
mtklein | 4be181e | 2015-07-14 10:54:19 -0700 | [diff] [blame] | 421 | } |
| 422 | |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 423 | uint16x8_t fVec; |
| 424 | }; |
| 425 | |
| 426 | template <> |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 427 | class SkNx<4, uint8_t> { |
| 428 | public: |
mtklein | a5e1e33 | 2016-07-26 10:07:34 -0700 | [diff] [blame] | 429 | typedef uint32_t __attribute__((aligned(1))) unaligned_uint32_t; |
| 430 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 431 | AI SkNx(const uint8x8_t& vec) : fVec(vec) {} |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 432 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 433 | AI SkNx() {} |
| 434 | AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 435 | fVec = (uint8x8_t){a,b,c,d, 0,0,0,0}; |
| 436 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 437 | AI static SkNx Load(const void* ptr) { |
mtklein | a5e1e33 | 2016-07-26 10:07:34 -0700 | [diff] [blame] | 438 | return (uint8x8_t)vld1_dup_u32((const unaligned_uint32_t*)ptr); |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 439 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 440 | AI void store(void* ptr) const { |
mtklein | a5e1e33 | 2016-07-26 10:07:34 -0700 | [diff] [blame] | 441 | return vst1_lane_u32((unaligned_uint32_t*)ptr, (uint32x2_t)fVec, 0); |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 442 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 443 | AI uint8_t operator[](int k) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 444 | SkASSERT(0 <= k && k < 4); |
| 445 | union { uint8x8_t v; uint8_t us[8]; } pun = {fVec}; |
| 446 | return pun.us[k&3]; |
| 447 | } |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 448 | |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 449 | // TODO as needed |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 450 | |
| 451 | uint8x8_t fVec; |
| 452 | }; |
| 453 | |
| 454 | template <> |
Herb Derby | d1b3c78 | 2017-11-02 13:18:38 -0400 | [diff] [blame] | 455 | class SkNx<8, uint8_t> { |
| 456 | public: |
| 457 | AI SkNx(const uint8x8_t& vec) : fVec(vec) {} |
| 458 | |
| 459 | AI SkNx() {} |
| 460 | AI SkNx(uint8_t val) : fVec(vdup_n_u8(val)) {} |
| 461 | AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d, |
| 462 | uint8_t e, uint8_t f, uint8_t g, uint8_t h) { |
| 463 | fVec = (uint8x8_t) { a,b,c,d, e,f,g,h }; |
| 464 | } |
| 465 | |
| 466 | AI static SkNx Load(const void* ptr) { return vld1_u8((const uint8_t*)ptr); } |
| 467 | AI void store(void* ptr) const { vst1_u8((uint8_t*)ptr, fVec); } |
| 468 | |
| 469 | AI uint8_t operator[](int k) const { |
| 470 | SkASSERT(0 <= k && k < 8); |
| 471 | union { uint8x8_t v; uint8_t us[8]; } pun = {fVec}; |
| 472 | return pun.us[k&7]; |
| 473 | } |
| 474 | |
| 475 | uint8x8_t fVec; |
| 476 | }; |
| 477 | |
| 478 | template <> |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 479 | class SkNx<16, uint8_t> { |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 480 | public: |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 481 | AI SkNx(const uint8x16_t& vec) : fVec(vec) {} |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 482 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 483 | AI SkNx() {} |
| 484 | AI SkNx(uint8_t val) : fVec(vdupq_n_u8(val)) {} |
| 485 | AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d, |
| 486 | uint8_t e, uint8_t f, uint8_t g, uint8_t h, |
| 487 | uint8_t i, uint8_t j, uint8_t k, uint8_t l, |
| 488 | uint8_t m, uint8_t n, uint8_t o, uint8_t p) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 489 | fVec = (uint8x16_t) { a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p }; |
| 490 | } |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 491 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 492 | AI static SkNx Load(const void* ptr) { return vld1q_u8((const uint8_t*)ptr); } |
| 493 | AI void store(void* ptr) const { vst1q_u8((uint8_t*)ptr, fVec); } |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 494 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 495 | AI SkNx saturatedAdd(const SkNx& o) const { return vqaddq_u8(fVec, o.fVec); } |
mtklein | 04d24a3 | 2015-05-13 08:02:14 -0700 | [diff] [blame] | 496 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 497 | AI SkNx operator + (const SkNx& o) const { return vaddq_u8(fVec, o.fVec); } |
| 498 | AI SkNx operator - (const SkNx& o) const { return vsubq_u8(fVec, o.fVec); } |
Mike Klein | 7dfe6d9 | 2018-12-18 14:53:37 -0500 | [diff] [blame] | 499 | AI SkNx operator & (const SkNx& o) const { return vandq_u8(fVec, o.fVec); } |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 500 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 501 | AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u8(a.fVec, b.fVec); } |
| 502 | AI SkNx operator < (const SkNx& o) const { return vcltq_u8(fVec, o.fVec); } |
mtklein | 27e517a | 2015-05-14 17:53:04 -0700 | [diff] [blame] | 503 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 504 | AI uint8_t operator[](int k) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 505 | SkASSERT(0 <= k && k < 16); |
| 506 | union { uint8x16_t v; uint8_t us[16]; } pun = {fVec}; |
| 507 | return pun.us[k&15]; |
| 508 | } |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 509 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 510 | AI SkNx thenElse(const SkNx& t, const SkNx& e) const { |
mtklein | cf4e567 | 2015-07-27 06:12:05 -0700 | [diff] [blame] | 511 | return vbslq_u8(fVec, t.fVec, e.fVec); |
mtklein | b5e8611 | 2015-06-24 15:18:39 -0700 | [diff] [blame] | 512 | } |
| 513 | |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 514 | uint8x16_t fVec; |
| 515 | }; |
| 516 | |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 517 | template <> |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 518 | class SkNx<4, int32_t> { |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 519 | public: |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 520 | AI SkNx(const int32x4_t& vec) : fVec(vec) {} |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 521 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 522 | AI SkNx() {} |
| 523 | AI SkNx(int32_t v) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 524 | fVec = vdupq_n_s32(v); |
| 525 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 526 | AI SkNx(int32_t a, int32_t b, int32_t c, int32_t d) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 527 | fVec = (int32x4_t){a,b,c,d}; |
| 528 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 529 | AI static SkNx Load(const void* ptr) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 530 | return vld1q_s32((const int32_t*)ptr); |
| 531 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 532 | AI void store(void* ptr) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 533 | return vst1q_s32((int32_t*)ptr, fVec); |
| 534 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 535 | AI int32_t operator[](int k) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 536 | SkASSERT(0 <= k && k < 4); |
| 537 | union { int32x4_t v; int32_t is[4]; } pun = {fVec}; |
| 538 | return pun.is[k&3]; |
| 539 | } |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 540 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 541 | AI SkNx operator + (const SkNx& o) const { return vaddq_s32(fVec, o.fVec); } |
| 542 | AI SkNx operator - (const SkNx& o) const { return vsubq_s32(fVec, o.fVec); } |
| 543 | AI SkNx operator * (const SkNx& o) const { return vmulq_s32(fVec, o.fVec); } |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 544 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 545 | AI SkNx operator & (const SkNx& o) const { return vandq_s32(fVec, o.fVec); } |
| 546 | AI SkNx operator | (const SkNx& o) const { return vorrq_s32(fVec, o.fVec); } |
| 547 | AI SkNx operator ^ (const SkNx& o) const { return veorq_s32(fVec, o.fVec); } |
mtklein | 64f061a | 2016-06-17 12:09:16 -0700 | [diff] [blame] | 548 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 549 | AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; } |
| 550 | AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; } |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 551 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 552 | AI SkNx operator == (const SkNx& o) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 553 | return vreinterpretq_s32_u32(vceqq_s32(fVec, o.fVec)); |
| 554 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 555 | AI SkNx operator < (const SkNx& o) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 556 | return vreinterpretq_s32_u32(vcltq_s32(fVec, o.fVec)); |
| 557 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 558 | AI SkNx operator > (const SkNx& o) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 559 | return vreinterpretq_s32_u32(vcgtq_s32(fVec, o.fVec)); |
| 560 | } |
mtklein | 58e389b | 2016-07-15 07:00:11 -0700 | [diff] [blame] | 561 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 562 | AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_s32(a.fVec, b.fVec); } |
Yuqian Li | 7da6ba2 | 2017-07-12 13:36:05 -0400 | [diff] [blame] | 563 | AI static SkNx Max(const SkNx& a, const SkNx& b) { return vmaxq_s32(a.fVec, b.fVec); } |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 564 | // TODO as needed |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 565 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 566 | AI SkNx thenElse(const SkNx& t, const SkNx& e) const { |
mtklein | 58e389b | 2016-07-15 07:00:11 -0700 | [diff] [blame] | 567 | return vbslq_s32(vreinterpretq_u32_s32(fVec), t.fVec, e.fVec); |
| 568 | } |
| 569 | |
Yuqian Li | 7da6ba2 | 2017-07-12 13:36:05 -0400 | [diff] [blame] | 570 | AI SkNx abs() const { return vabsq_s32(fVec); } |
| 571 | |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 572 | int32x4_t fVec; |
| 573 | }; |
| 574 | |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 575 | template <> |
| 576 | class SkNx<4, uint32_t> { |
| 577 | public: |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 578 | AI SkNx(const uint32x4_t& vec) : fVec(vec) {} |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 579 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 580 | AI SkNx() {} |
| 581 | AI SkNx(uint32_t v) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 582 | fVec = vdupq_n_u32(v); |
| 583 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 584 | AI SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 585 | fVec = (uint32x4_t){a,b,c,d}; |
| 586 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 587 | AI static SkNx Load(const void* ptr) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 588 | return vld1q_u32((const uint32_t*)ptr); |
| 589 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 590 | AI void store(void* ptr) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 591 | return vst1q_u32((uint32_t*)ptr, fVec); |
| 592 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 593 | AI uint32_t operator[](int k) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 594 | SkASSERT(0 <= k && k < 4); |
| 595 | union { uint32x4_t v; uint32_t us[4]; } pun = {fVec}; |
| 596 | return pun.us[k&3]; |
| 597 | } |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 598 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 599 | AI SkNx operator + (const SkNx& o) const { return vaddq_u32(fVec, o.fVec); } |
| 600 | AI SkNx operator - (const SkNx& o) const { return vsubq_u32(fVec, o.fVec); } |
| 601 | AI SkNx operator * (const SkNx& o) const { return vmulq_u32(fVec, o.fVec); } |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 602 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 603 | AI SkNx operator & (const SkNx& o) const { return vandq_u32(fVec, o.fVec); } |
| 604 | AI SkNx operator | (const SkNx& o) const { return vorrq_u32(fVec, o.fVec); } |
| 605 | AI SkNx operator ^ (const SkNx& o) const { return veorq_u32(fVec, o.fVec); } |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 606 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 607 | AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; } |
| 608 | AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; } |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 609 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 610 | AI SkNx operator == (const SkNx& o) const { return vceqq_u32(fVec, o.fVec); } |
| 611 | AI SkNx operator < (const SkNx& o) const { return vcltq_u32(fVec, o.fVec); } |
| 612 | AI SkNx operator > (const SkNx& o) const { return vcgtq_u32(fVec, o.fVec); } |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 613 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 614 | AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u32(a.fVec, b.fVec); } |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 615 | // TODO as needed |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 616 | |
Herb Derby | 5eb1528 | 2017-10-10 17:14:18 -0400 | [diff] [blame] | 617 | AI SkNx mulHi(const SkNx& m) const { |
| 618 | uint64x2_t hi = vmull_u32(vget_high_u32(fVec), vget_high_u32(m.fVec)); |
| 619 | uint64x2_t lo = vmull_u32( vget_low_u32(fVec), vget_low_u32(m.fVec)); |
| 620 | |
| 621 | return { vcombine_u32(vshrn_n_u64(lo,32), vshrn_n_u64(hi,32)) }; |
| 622 | } |
| 623 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 624 | AI SkNx thenElse(const SkNx& t, const SkNx& e) const { |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 625 | return vbslq_u32(fVec, t.fVec, e.fVec); |
| 626 | } |
| 627 | |
| 628 | uint32x4_t fVec; |
| 629 | }; |
| 630 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 631 | template<> AI /*static*/ Sk4i SkNx_cast<int32_t, float>(const Sk4f& src) { |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 632 | return vcvtq_s32_f32(src.fVec); |
| 633 | |
| 634 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 635 | template<> AI /*static*/ Sk4f SkNx_cast<float, int32_t>(const Sk4i& src) { |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 636 | return vcvtq_f32_s32(src.fVec); |
| 637 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 638 | template<> AI /*static*/ Sk4f SkNx_cast<float, uint32_t>(const Sk4u& src) { |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 639 | return SkNx_cast<float>(Sk4i::Load(&src)); |
| 640 | } |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 641 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 642 | template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) { |
mtklein | be8c19e | 2016-02-19 09:40:24 -0800 | [diff] [blame] | 643 | return vqmovn_u32(vcvtq_u32_f32(src.fVec)); |
| 644 | } |
| 645 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 646 | template<> AI /*static*/ Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) { |
mtklein | be8c19e | 2016-02-19 09:40:24 -0800 | [diff] [blame] | 647 | return vcvtq_f32_u32(vmovl_u16(src.fVec)); |
| 648 | } |
| 649 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 650 | template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) { |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 651 | uint32x4_t _32 = vcvtq_u32_f32(src.fVec); |
| 652 | uint16x4_t _16 = vqmovn_u32(_32); |
| 653 | return vqmovn_u16(vcombine_u16(_16, _16)); |
| 654 | } |
| 655 | |
Herb Derby | 5eb1528 | 2017-10-10 17:14:18 -0400 | [diff] [blame] | 656 | template<> AI /*static*/ Sk4u SkNx_cast<uint32_t, uint8_t>(const Sk4b& src) { |
Mike Klein | 06a65e2 | 2016-11-17 12:39:09 -0500 | [diff] [blame] | 657 | uint16x8_t _16 = vmovl_u8(src.fVec); |
Herb Derby | 5eb1528 | 2017-10-10 17:14:18 -0400 | [diff] [blame] | 658 | return vmovl_u16(vget_low_u16(_16)); |
| 659 | } |
| 660 | |
| 661 | template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint8_t>(const Sk4b& src) { |
| 662 | return vreinterpretq_s32_u32(SkNx_cast<uint32_t>(src).fVec); |
Mike Klein | 06a65e2 | 2016-11-17 12:39:09 -0500 | [diff] [blame] | 663 | } |
| 664 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 665 | template<> AI /*static*/ Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) { |
Mike Klein | b6ab4ae | 2016-11-17 14:33:11 -0500 | [diff] [blame] | 666 | return vcvtq_f32_s32(SkNx_cast<int32_t>(src).fVec); |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 667 | } |
| 668 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 669 | template<> AI /*static*/ Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) { |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 670 | Sk8f ab, cd; |
| 671 | SkNx_split(src, &ab, &cd); |
| 672 | |
| 673 | Sk4f a,b,c,d; |
| 674 | SkNx_split(ab, &a, &b); |
| 675 | SkNx_split(cd, &c, &d); |
| 676 | return vuzpq_u8(vuzpq_u8((uint8x16_t)vcvtq_u32_f32(a.fVec), |
| 677 | (uint8x16_t)vcvtq_u32_f32(b.fVec)).val[0], |
| 678 | vuzpq_u8((uint8x16_t)vcvtq_u32_f32(c.fVec), |
| 679 | (uint8x16_t)vcvtq_u32_f32(d.fVec)).val[0]).val[0]; |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 680 | } |
| 681 | |
Herb Derby | d1b3c78 | 2017-11-02 13:18:38 -0400 | [diff] [blame] | 682 | template<> AI /*static*/ Sk8b SkNx_cast<uint8_t, int32_t>(const Sk8i& src) { |
| 683 | Sk4i a, b; |
| 684 | SkNx_split(src, &a, &b); |
| 685 | uint16x4_t a16 = vqmovun_s32(a.fVec); |
| 686 | uint16x4_t b16 = vqmovun_s32(b.fVec); |
| 687 | |
| 688 | return vqmovn_u16(vcombine_u16(a16, b16)); |
| 689 | } |
| 690 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 691 | template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) { |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 692 | return vget_low_u16(vmovl_u8(src.fVec)); |
| 693 | } |
| 694 | |
Herb Derby | d1b3c78 | 2017-11-02 13:18:38 -0400 | [diff] [blame] | 695 | template<> AI /*static*/ Sk8h SkNx_cast<uint16_t, uint8_t>(const Sk8b& src) { |
| 696 | return vmovl_u8(src.fVec); |
| 697 | } |
| 698 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 699 | template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) { |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 700 | return vmovn_u16(vcombine_u16(src.fVec, src.fVec)); |
| 701 | } |
| 702 | |
Herb Derby | d1b3c78 | 2017-11-02 13:18:38 -0400 | [diff] [blame] | 703 | template<> AI /*static*/ Sk8b SkNx_cast<uint8_t, uint16_t>(const Sk8h& src) { |
| 704 | return vqmovn_u16(src.fVec); |
| 705 | } |
| 706 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 707 | template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, int32_t>(const Sk4i& src) { |
msarett | 7d3ff71 | 2016-07-12 14:55:45 -0700 | [diff] [blame] | 708 | uint16x4_t _16 = vqmovun_s32(src.fVec); |
| 709 | return vqmovn_u16(vcombine_u16(_16, _16)); |
| 710 | } |
| 711 | |
Herb Derby | 0f96bb3 | 2017-09-13 16:46:05 -0400 | [diff] [blame] | 712 | template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, uint32_t>(const Sk4u& src) { |
| 713 | uint16x4_t _16 = vqmovn_u32(src.fVec); |
| 714 | return vqmovn_u16(vcombine_u16(_16, _16)); |
| 715 | } |
| 716 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 717 | template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint16_t>(const Sk4h& src) { |
mtklein | 58e389b | 2016-07-15 07:00:11 -0700 | [diff] [blame] | 718 | return vreinterpretq_s32_u32(vmovl_u16(src.fVec)); |
| 719 | } |
| 720 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 721 | template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, int32_t>(const Sk4i& src) { |
mtklein | 58e389b | 2016-07-15 07:00:11 -0700 | [diff] [blame] | 722 | return vmovn_u32(vreinterpretq_u32_s32(src.fVec)); |
| 723 | } |
| 724 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 725 | template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint32_t>(const Sk4u& src) { |
msarett | 15ee3de | 2016-08-02 11:30:30 -0700 | [diff] [blame] | 726 | return vreinterpretq_s32_u32(src.fVec); |
| 727 | } |
| 728 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 729 | AI static Sk4i Sk4f_round(const Sk4f& x) { |
msarett | 7d3ff71 | 2016-07-12 14:55:45 -0700 | [diff] [blame] | 730 | return vcvtq_s32_f32((x + 0.5f).fVec); |
| 731 | } |
| 732 | |
Mike Klein | 1e76464 | 2016-10-14 17:09:03 -0400 | [diff] [blame] | 733 | } // namespace |
| 734 | |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 735 | #endif//SkNx_neon_DEFINED |