mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2015 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | #ifndef SkNx_neon_DEFINED |
| 9 | #define SkNx_neon_DEFINED |
| 10 | |
mtklein | e18fa44 | 2016-06-09 13:40:56 -0700 | [diff] [blame] | 11 | #include <arm_neon.h> |
| 12 | |
Mike Klein | 1e76464 | 2016-10-14 17:09:03 -0400 | [diff] [blame] | 13 | namespace { |
| 14 | |
mtklein | 126626e | 2016-02-09 15:41:36 -0800 | [diff] [blame] | 15 | // ARMv8 has vrndmq_f32 to floor 4 floats. Here we emulate it: |
mtklein | e5fe9a4 | 2016-02-10 07:55:56 -0800 | [diff] [blame] | 16 | // - roundtrip through integers via truncation |
| 17 | // - subtract 1 if that's too big (possible for negative values). |
| 18 | // This restricts the domain of our inputs to a maximum somehwere around 2^31. Seems plenty big. |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 19 | AI static float32x4_t armv7_vrndmq_f32(float32x4_t v) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 20 | auto roundtrip = vcvtq_f32_s32(vcvtq_s32_f32(v)); |
| 21 | auto too_big = vcgtq_f32(roundtrip, v); |
| 22 | return vsubq_f32(roundtrip, (float32x4_t)vandq_u32(too_big, (uint32x4_t)vdupq_n_f32(1))); |
mtklein | 126626e | 2016-02-09 15:41:36 -0800 | [diff] [blame] | 23 | } |
| 24 | |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 25 | template <> |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 26 | class SkNx<2, float> { |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 27 | public: |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 28 | AI SkNx(float32x2_t vec) : fVec(vec) {} |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 29 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 30 | AI SkNx() {} |
| 31 | AI SkNx(float val) : fVec(vdup_n_f32(val)) {} |
| 32 | AI SkNx(float a, float b) { fVec = (float32x2_t) { a, b }; } |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 33 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 34 | AI static SkNx Load(const void* ptr) { return vld1_f32((const float*)ptr); } |
| 35 | AI void store(void* ptr) const { vst1_f32((float*)ptr, fVec); } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 36 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 37 | AI SkNx invert() const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 38 | float32x2_t est0 = vrecpe_f32(fVec), |
| 39 | est1 = vmul_f32(vrecps_f32(est0, fVec), est0); |
| 40 | return est1; |
| 41 | } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 42 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 43 | AI SkNx operator + (const SkNx& o) const { return vadd_f32(fVec, o.fVec); } |
| 44 | AI SkNx operator - (const SkNx& o) const { return vsub_f32(fVec, o.fVec); } |
| 45 | AI SkNx operator * (const SkNx& o) const { return vmul_f32(fVec, o.fVec); } |
| 46 | AI SkNx operator / (const SkNx& o) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 47 | #if defined(SK_CPU_ARM64) |
| 48 | return vdiv_f32(fVec, o.fVec); |
| 49 | #else |
| 50 | float32x2_t est0 = vrecpe_f32(o.fVec), |
| 51 | est1 = vmul_f32(vrecps_f32(est0, o.fVec), est0), |
| 52 | est2 = vmul_f32(vrecps_f32(est1, o.fVec), est1); |
| 53 | return vmul_f32(fVec, est2); |
| 54 | #endif |
| 55 | } |
| 56 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 57 | AI SkNx operator==(const SkNx& o) const { return vreinterpret_f32_u32(vceq_f32(fVec, o.fVec)); } |
| 58 | AI SkNx operator <(const SkNx& o) const { return vreinterpret_f32_u32(vclt_f32(fVec, o.fVec)); } |
| 59 | AI SkNx operator >(const SkNx& o) const { return vreinterpret_f32_u32(vcgt_f32(fVec, o.fVec)); } |
| 60 | AI SkNx operator<=(const SkNx& o) const { return vreinterpret_f32_u32(vcle_f32(fVec, o.fVec)); } |
| 61 | AI SkNx operator>=(const SkNx& o) const { return vreinterpret_f32_u32(vcge_f32(fVec, o.fVec)); } |
| 62 | AI SkNx operator!=(const SkNx& o) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 63 | return vreinterpret_f32_u32(vmvn_u32(vceq_f32(fVec, o.fVec))); |
| 64 | } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 65 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 66 | AI static SkNx Min(const SkNx& l, const SkNx& r) { return vmin_f32(l.fVec, r.fVec); } |
| 67 | AI static SkNx Max(const SkNx& l, const SkNx& r) { return vmax_f32(l.fVec, r.fVec); } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 68 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 69 | AI SkNx rsqrt() const { |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 70 | float32x2_t est0 = vrsqrte_f32(fVec); |
mtklein | d7c014f | 2015-04-27 14:22:32 -0700 | [diff] [blame] | 71 | return vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0); |
| 72 | } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 73 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 74 | AI SkNx sqrt() const { |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 75 | #if defined(SK_CPU_ARM64) |
| 76 | return vsqrt_f32(fVec); |
| 77 | #else |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 78 | float32x2_t est0 = vrsqrte_f32(fVec), |
| 79 | est1 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0), |
| 80 | est2 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est1, est1)), est1); |
| 81 | return vmul_f32(fVec, est2); |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 82 | #endif |
| 83 | } |
| 84 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 85 | AI float operator[](int k) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 86 | SkASSERT(0 <= k && k < 2); |
| 87 | union { float32x2_t v; float fs[2]; } pun = {fVec}; |
| 88 | return pun.fs[k&1]; |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 89 | } |
| 90 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 91 | AI bool allTrue() const { |
mtklein | b5e8611 | 2015-06-24 15:18:39 -0700 | [diff] [blame] | 92 | auto v = vreinterpret_u32_f32(fVec); |
| 93 | return vget_lane_u32(v,0) && vget_lane_u32(v,1); |
| 94 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 95 | AI bool anyTrue() const { |
mtklein | b5e8611 | 2015-06-24 15:18:39 -0700 | [diff] [blame] | 96 | auto v = vreinterpret_u32_f32(fVec); |
| 97 | return vget_lane_u32(v,0) || vget_lane_u32(v,1); |
| 98 | } |
| 99 | |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 100 | float32x2_t fVec; |
| 101 | }; |
| 102 | |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 103 | template <> |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 104 | class SkNx<4, float> { |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 105 | public: |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 106 | AI SkNx(float32x4_t vec) : fVec(vec) {} |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 107 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 108 | AI SkNx() {} |
| 109 | AI SkNx(float val) : fVec(vdupq_n_f32(val)) {} |
| 110 | AI SkNx(float a, float b, float c, float d) { fVec = (float32x4_t) { a, b, c, d }; } |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 111 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 112 | AI static SkNx Load(const void* ptr) { return vld1q_f32((const float*)ptr); } |
| 113 | AI void store(void* ptr) const { vst1q_f32((float*)ptr, fVec); } |
Mike Klein | 33cbfd7 | 2016-10-06 11:09:27 -0400 | [diff] [blame] | 114 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 115 | AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) { |
Mike Klein | 33cbfd7 | 2016-10-06 11:09:27 -0400 | [diff] [blame] | 116 | float32x4x4_t rgba = vld4q_f32((const float*) ptr); |
| 117 | *r = rgba.val[0]; |
| 118 | *g = rgba.val[1]; |
| 119 | *b = rgba.val[2]; |
| 120 | *a = rgba.val[3]; |
| 121 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 122 | AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) { |
Mike Klein | 33cbfd7 | 2016-10-06 11:09:27 -0400 | [diff] [blame] | 123 | float32x4x4_t rgba = {{ |
| 124 | r.fVec, |
| 125 | g.fVec, |
| 126 | b.fVec, |
| 127 | a.fVec, |
| 128 | }}; |
| 129 | vst4q_f32((float*) dst, rgba); |
| 130 | } |
| 131 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 132 | AI SkNx invert() const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 133 | float32x4_t est0 = vrecpeq_f32(fVec), |
| 134 | est1 = vmulq_f32(vrecpsq_f32(est0, fVec), est0); |
| 135 | return est1; |
| 136 | } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 137 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 138 | AI SkNx operator + (const SkNx& o) const { return vaddq_f32(fVec, o.fVec); } |
| 139 | AI SkNx operator - (const SkNx& o) const { return vsubq_f32(fVec, o.fVec); } |
| 140 | AI SkNx operator * (const SkNx& o) const { return vmulq_f32(fVec, o.fVec); } |
| 141 | AI SkNx operator / (const SkNx& o) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 142 | #if defined(SK_CPU_ARM64) |
| 143 | return vdivq_f32(fVec, o.fVec); |
| 144 | #else |
| 145 | float32x4_t est0 = vrecpeq_f32(o.fVec), |
| 146 | est1 = vmulq_f32(vrecpsq_f32(est0, o.fVec), est0), |
| 147 | est2 = vmulq_f32(vrecpsq_f32(est1, o.fVec), est1); |
| 148 | return vmulq_f32(fVec, est2); |
| 149 | #endif |
| 150 | } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 151 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 152 | AI SkNx operator==(const SkNx& o) const {return vreinterpretq_f32_u32(vceqq_f32(fVec, o.fVec));} |
| 153 | AI SkNx operator <(const SkNx& o) const {return vreinterpretq_f32_u32(vcltq_f32(fVec, o.fVec));} |
| 154 | AI SkNx operator >(const SkNx& o) const {return vreinterpretq_f32_u32(vcgtq_f32(fVec, o.fVec));} |
| 155 | AI SkNx operator<=(const SkNx& o) const {return vreinterpretq_f32_u32(vcleq_f32(fVec, o.fVec));} |
| 156 | AI SkNx operator>=(const SkNx& o) const {return vreinterpretq_f32_u32(vcgeq_f32(fVec, o.fVec));} |
| 157 | AI SkNx operator!=(const SkNx& o) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 158 | return vreinterpretq_f32_u32(vmvnq_u32(vceqq_f32(fVec, o.fVec))); |
| 159 | } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 160 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 161 | AI static SkNx Min(const SkNx& l, const SkNx& r) { return vminq_f32(l.fVec, r.fVec); } |
| 162 | AI static SkNx Max(const SkNx& l, const SkNx& r) { return vmaxq_f32(l.fVec, r.fVec); } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 163 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 164 | AI SkNx abs() const { return vabsq_f32(fVec); } |
| 165 | AI SkNx floor() const { |
mtklein | 126626e | 2016-02-09 15:41:36 -0800 | [diff] [blame] | 166 | #if defined(SK_CPU_ARM64) |
| 167 | return vrndmq_f32(fVec); |
| 168 | #else |
| 169 | return armv7_vrndmq_f32(fVec); |
| 170 | #endif |
| 171 | } |
| 172 | |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 173 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 174 | AI SkNx rsqrt() const { |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 175 | float32x4_t est0 = vrsqrteq_f32(fVec); |
mtklein | d7c014f | 2015-04-27 14:22:32 -0700 | [diff] [blame] | 176 | return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0); |
| 177 | } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 178 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 179 | AI SkNx sqrt() const { |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 180 | #if defined(SK_CPU_ARM64) |
| 181 | return vsqrtq_f32(fVec); |
| 182 | #else |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 183 | float32x4_t est0 = vrsqrteq_f32(fVec), |
| 184 | est1 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0), |
| 185 | est2 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est1, est1)), est1); |
| 186 | return vmulq_f32(fVec, est2); |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 187 | #endif |
| 188 | } |
| 189 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 190 | AI float operator[](int k) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 191 | SkASSERT(0 <= k && k < 4); |
| 192 | union { float32x4_t v; float fs[4]; } pun = {fVec}; |
| 193 | return pun.fs[k&3]; |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 194 | } |
| 195 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 196 | AI bool allTrue() const { |
mtklein | b5e8611 | 2015-06-24 15:18:39 -0700 | [diff] [blame] | 197 | auto v = vreinterpretq_u32_f32(fVec); |
| 198 | return vgetq_lane_u32(v,0) && vgetq_lane_u32(v,1) |
| 199 | && vgetq_lane_u32(v,2) && vgetq_lane_u32(v,3); |
| 200 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 201 | AI bool anyTrue() const { |
mtklein | b5e8611 | 2015-06-24 15:18:39 -0700 | [diff] [blame] | 202 | auto v = vreinterpretq_u32_f32(fVec); |
| 203 | return vgetq_lane_u32(v,0) || vgetq_lane_u32(v,1) |
| 204 | || vgetq_lane_u32(v,2) || vgetq_lane_u32(v,3); |
| 205 | } |
| 206 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 207 | AI SkNx thenElse(const SkNx& t, const SkNx& e) const { |
mtklein | cf4e567 | 2015-07-27 06:12:05 -0700 | [diff] [blame] | 208 | return vbslq_f32(vreinterpretq_u32_f32(fVec), t.fVec, e.fVec); |
mtklein | 2aab22a | 2015-06-26 10:46:31 -0700 | [diff] [blame] | 209 | } |
| 210 | |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 211 | float32x4_t fVec; |
| 212 | }; |
| 213 | |
Mike Klein | f0348c2 | 2016-11-03 14:43:48 -0400 | [diff] [blame] | 214 | #if defined(SK_CPU_ARM64) |
| 215 | AI static Sk4f SkNx_fma(const Sk4f& f, const Sk4f& m, const Sk4f& a) { |
| 216 | return vfmaq_f32(a.fVec, f.fVec, m.fVec); |
| 217 | } |
| 218 | #endif |
| 219 | |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 220 | // It's possible that for our current use cases, representing this as |
| 221 | // half a uint16x8_t might be better than representing it as a uint16x4_t. |
| 222 | // It'd make conversion to Sk4b one step simpler. |
| 223 | template <> |
| 224 | class SkNx<4, uint16_t> { |
| 225 | public: |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 226 | AI SkNx(const uint16x4_t& vec) : fVec(vec) {} |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 227 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 228 | AI SkNx() {} |
| 229 | AI SkNx(uint16_t val) : fVec(vdup_n_u16(val)) {} |
| 230 | AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 231 | fVec = (uint16x4_t) { a,b,c,d }; |
| 232 | } |
| 233 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 234 | AI static SkNx Load(const void* ptr) { return vld1_u16((const uint16_t*)ptr); } |
| 235 | AI void store(void* ptr) const { vst1_u16((uint16_t*)ptr, fVec); } |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 236 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 237 | AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) { |
Mike Klein | 33cbfd7 | 2016-10-06 11:09:27 -0400 | [diff] [blame] | 238 | uint16x4x4_t rgba = vld4_u16((const uint16_t*)ptr); |
| 239 | *r = rgba.val[0]; |
| 240 | *g = rgba.val[1]; |
| 241 | *b = rgba.val[2]; |
| 242 | *a = rgba.val[3]; |
| 243 | } |
Matt Sarett | 5bee0b6 | 2017-01-19 12:04:32 -0500 | [diff] [blame] | 244 | AI static void Load3(const void* ptr, SkNx* r, SkNx* g, SkNx* b) { |
| 245 | uint16x4x3_t rgba = vld3_u16((const uint16_t*)ptr); |
| 246 | *r = rgba.val[0]; |
| 247 | *g = rgba.val[1]; |
| 248 | *b = rgba.val[2]; |
| 249 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 250 | AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) { |
Mike Klein | 33cbfd7 | 2016-10-06 11:09:27 -0400 | [diff] [blame] | 251 | uint16x4x4_t rgba = {{ |
| 252 | r.fVec, |
| 253 | g.fVec, |
| 254 | b.fVec, |
| 255 | a.fVec, |
| 256 | }}; |
| 257 | vst4_u16((uint16_t*) dst, rgba); |
| 258 | } |
| 259 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 260 | AI SkNx operator + (const SkNx& o) const { return vadd_u16(fVec, o.fVec); } |
| 261 | AI SkNx operator - (const SkNx& o) const { return vsub_u16(fVec, o.fVec); } |
| 262 | AI SkNx operator * (const SkNx& o) const { return vmul_u16(fVec, o.fVec); } |
Matt Sarett | 379938e | 2017-01-12 18:34:29 -0500 | [diff] [blame] | 263 | AI SkNx operator & (const SkNx& o) const { return vand_u16(fVec, o.fVec); } |
| 264 | AI SkNx operator | (const SkNx& o) const { return vorr_u16(fVec, o.fVec); } |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 265 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 266 | AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; } |
| 267 | AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; } |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 268 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 269 | AI static SkNx Min(const SkNx& a, const SkNx& b) { return vmin_u16(a.fVec, b.fVec); } |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 270 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 271 | AI uint16_t operator[](int k) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 272 | SkASSERT(0 <= k && k < 4); |
| 273 | union { uint16x4_t v; uint16_t us[4]; } pun = {fVec}; |
| 274 | return pun.us[k&3]; |
| 275 | } |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 276 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 277 | AI SkNx thenElse(const SkNx& t, const SkNx& e) const { |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 278 | return vbsl_u16(fVec, t.fVec, e.fVec); |
| 279 | } |
| 280 | |
| 281 | uint16x4_t fVec; |
| 282 | }; |
| 283 | |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 284 | template <> |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 285 | class SkNx<8, uint16_t> { |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 286 | public: |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 287 | AI SkNx(const uint16x8_t& vec) : fVec(vec) {} |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 288 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 289 | AI SkNx() {} |
| 290 | AI SkNx(uint16_t val) : fVec(vdupq_n_u16(val)) {} |
| 291 | AI static SkNx Load(const void* ptr) { return vld1q_u16((const uint16_t*)ptr); } |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 292 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 293 | AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d, |
| 294 | uint16_t e, uint16_t f, uint16_t g, uint16_t h) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 295 | fVec = (uint16x8_t) { a,b,c,d, e,f,g,h }; |
| 296 | } |
| 297 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 298 | AI void store(void* ptr) const { vst1q_u16((uint16_t*)ptr, fVec); } |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 299 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 300 | AI SkNx operator + (const SkNx& o) const { return vaddq_u16(fVec, o.fVec); } |
| 301 | AI SkNx operator - (const SkNx& o) const { return vsubq_u16(fVec, o.fVec); } |
| 302 | AI SkNx operator * (const SkNx& o) const { return vmulq_u16(fVec, o.fVec); } |
Matt Sarett | 379938e | 2017-01-12 18:34:29 -0500 | [diff] [blame] | 303 | AI SkNx operator & (const SkNx& o) const { return vandq_u16(fVec, o.fVec); } |
| 304 | AI SkNx operator | (const SkNx& o) const { return vorrq_u16(fVec, o.fVec); } |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 305 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 306 | AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; } |
| 307 | AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; } |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 308 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 309 | AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u16(a.fVec, b.fVec); } |
mtklein | 27e517a | 2015-05-14 17:53:04 -0700 | [diff] [blame] | 310 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 311 | AI uint16_t operator[](int k) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 312 | SkASSERT(0 <= k && k < 8); |
| 313 | union { uint16x8_t v; uint16_t us[8]; } pun = {fVec}; |
| 314 | return pun.us[k&7]; |
| 315 | } |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 316 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 317 | AI SkNx thenElse(const SkNx& t, const SkNx& e) const { |
mtklein | cf4e567 | 2015-07-27 06:12:05 -0700 | [diff] [blame] | 318 | return vbslq_u16(fVec, t.fVec, e.fVec); |
mtklein | 4be181e | 2015-07-14 10:54:19 -0700 | [diff] [blame] | 319 | } |
| 320 | |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 321 | uint16x8_t fVec; |
| 322 | }; |
| 323 | |
| 324 | template <> |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 325 | class SkNx<4, uint8_t> { |
| 326 | public: |
mtklein | a5e1e33 | 2016-07-26 10:07:34 -0700 | [diff] [blame] | 327 | typedef uint32_t __attribute__((aligned(1))) unaligned_uint32_t; |
| 328 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 329 | AI SkNx(const uint8x8_t& vec) : fVec(vec) {} |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 330 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 331 | AI SkNx() {} |
| 332 | AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 333 | fVec = (uint8x8_t){a,b,c,d, 0,0,0,0}; |
| 334 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 335 | AI static SkNx Load(const void* ptr) { |
mtklein | a5e1e33 | 2016-07-26 10:07:34 -0700 | [diff] [blame] | 336 | return (uint8x8_t)vld1_dup_u32((const unaligned_uint32_t*)ptr); |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 337 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 338 | AI void store(void* ptr) const { |
mtklein | a5e1e33 | 2016-07-26 10:07:34 -0700 | [diff] [blame] | 339 | return vst1_lane_u32((unaligned_uint32_t*)ptr, (uint32x2_t)fVec, 0); |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 340 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 341 | AI uint8_t operator[](int k) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 342 | SkASSERT(0 <= k && k < 4); |
| 343 | union { uint8x8_t v; uint8_t us[8]; } pun = {fVec}; |
| 344 | return pun.us[k&3]; |
| 345 | } |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 346 | |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 347 | // TODO as needed |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 348 | |
| 349 | uint8x8_t fVec; |
| 350 | }; |
| 351 | |
| 352 | template <> |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 353 | class SkNx<16, uint8_t> { |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 354 | public: |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 355 | AI SkNx(const uint8x16_t& vec) : fVec(vec) {} |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 356 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 357 | AI SkNx() {} |
| 358 | AI SkNx(uint8_t val) : fVec(vdupq_n_u8(val)) {} |
| 359 | AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d, |
| 360 | uint8_t e, uint8_t f, uint8_t g, uint8_t h, |
| 361 | uint8_t i, uint8_t j, uint8_t k, uint8_t l, |
| 362 | uint8_t m, uint8_t n, uint8_t o, uint8_t p) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 363 | fVec = (uint8x16_t) { a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p }; |
| 364 | } |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 365 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 366 | AI static SkNx Load(const void* ptr) { return vld1q_u8((const uint8_t*)ptr); } |
| 367 | AI void store(void* ptr) const { vst1q_u8((uint8_t*)ptr, fVec); } |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 368 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 369 | AI SkNx saturatedAdd(const SkNx& o) const { return vqaddq_u8(fVec, o.fVec); } |
mtklein | 04d24a3 | 2015-05-13 08:02:14 -0700 | [diff] [blame] | 370 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 371 | AI SkNx operator + (const SkNx& o) const { return vaddq_u8(fVec, o.fVec); } |
| 372 | AI SkNx operator - (const SkNx& o) const { return vsubq_u8(fVec, o.fVec); } |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 373 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 374 | AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u8(a.fVec, b.fVec); } |
| 375 | AI SkNx operator < (const SkNx& o) const { return vcltq_u8(fVec, o.fVec); } |
mtklein | 27e517a | 2015-05-14 17:53:04 -0700 | [diff] [blame] | 376 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 377 | AI uint8_t operator[](int k) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 378 | SkASSERT(0 <= k && k < 16); |
| 379 | union { uint8x16_t v; uint8_t us[16]; } pun = {fVec}; |
| 380 | return pun.us[k&15]; |
| 381 | } |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 382 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 383 | AI SkNx thenElse(const SkNx& t, const SkNx& e) const { |
mtklein | cf4e567 | 2015-07-27 06:12:05 -0700 | [diff] [blame] | 384 | return vbslq_u8(fVec, t.fVec, e.fVec); |
mtklein | b5e8611 | 2015-06-24 15:18:39 -0700 | [diff] [blame] | 385 | } |
| 386 | |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 387 | uint8x16_t fVec; |
| 388 | }; |
| 389 | |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 390 | template <> |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 391 | class SkNx<4, int32_t> { |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 392 | public: |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 393 | AI SkNx(const int32x4_t& vec) : fVec(vec) {} |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 394 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 395 | AI SkNx() {} |
| 396 | AI SkNx(int32_t v) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 397 | fVec = vdupq_n_s32(v); |
| 398 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 399 | AI SkNx(int32_t a, int32_t b, int32_t c, int32_t d) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 400 | fVec = (int32x4_t){a,b,c,d}; |
| 401 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 402 | AI static SkNx Load(const void* ptr) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 403 | return vld1q_s32((const int32_t*)ptr); |
| 404 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 405 | AI void store(void* ptr) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 406 | return vst1q_s32((int32_t*)ptr, fVec); |
| 407 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 408 | AI int32_t operator[](int k) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 409 | SkASSERT(0 <= k && k < 4); |
| 410 | union { int32x4_t v; int32_t is[4]; } pun = {fVec}; |
| 411 | return pun.is[k&3]; |
| 412 | } |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 413 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 414 | AI SkNx operator + (const SkNx& o) const { return vaddq_s32(fVec, o.fVec); } |
| 415 | AI SkNx operator - (const SkNx& o) const { return vsubq_s32(fVec, o.fVec); } |
| 416 | AI SkNx operator * (const SkNx& o) const { return vmulq_s32(fVec, o.fVec); } |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 417 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 418 | AI SkNx operator & (const SkNx& o) const { return vandq_s32(fVec, o.fVec); } |
| 419 | AI SkNx operator | (const SkNx& o) const { return vorrq_s32(fVec, o.fVec); } |
| 420 | AI SkNx operator ^ (const SkNx& o) const { return veorq_s32(fVec, o.fVec); } |
mtklein | 64f061a | 2016-06-17 12:09:16 -0700 | [diff] [blame] | 421 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 422 | AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; } |
| 423 | AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; } |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 424 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 425 | AI SkNx operator == (const SkNx& o) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 426 | return vreinterpretq_s32_u32(vceqq_s32(fVec, o.fVec)); |
| 427 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 428 | AI SkNx operator < (const SkNx& o) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 429 | return vreinterpretq_s32_u32(vcltq_s32(fVec, o.fVec)); |
| 430 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 431 | AI SkNx operator > (const SkNx& o) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 432 | return vreinterpretq_s32_u32(vcgtq_s32(fVec, o.fVec)); |
| 433 | } |
mtklein | 58e389b | 2016-07-15 07:00:11 -0700 | [diff] [blame] | 434 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 435 | AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_s32(a.fVec, b.fVec); } |
Yuqian Li | 7da6ba2 | 2017-07-12 13:36:05 -0400 | [diff] [blame^] | 436 | AI static SkNx Max(const SkNx& a, const SkNx& b) { return vmaxq_s32(a.fVec, b.fVec); } |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 437 | // TODO as needed |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 438 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 439 | AI SkNx thenElse(const SkNx& t, const SkNx& e) const { |
mtklein | 58e389b | 2016-07-15 07:00:11 -0700 | [diff] [blame] | 440 | return vbslq_s32(vreinterpretq_u32_s32(fVec), t.fVec, e.fVec); |
| 441 | } |
| 442 | |
Yuqian Li | 7da6ba2 | 2017-07-12 13:36:05 -0400 | [diff] [blame^] | 443 | AI SkNx abs() const { return vabsq_s32(fVec); } |
| 444 | |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 445 | int32x4_t fVec; |
| 446 | }; |
| 447 | |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 448 | template <> |
| 449 | class SkNx<4, uint32_t> { |
| 450 | public: |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 451 | AI SkNx(const uint32x4_t& vec) : fVec(vec) {} |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 452 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 453 | AI SkNx() {} |
| 454 | AI SkNx(uint32_t v) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 455 | fVec = vdupq_n_u32(v); |
| 456 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 457 | AI SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 458 | fVec = (uint32x4_t){a,b,c,d}; |
| 459 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 460 | AI static SkNx Load(const void* ptr) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 461 | return vld1q_u32((const uint32_t*)ptr); |
| 462 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 463 | AI void store(void* ptr) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 464 | return vst1q_u32((uint32_t*)ptr, fVec); |
| 465 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 466 | AI uint32_t operator[](int k) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 467 | SkASSERT(0 <= k && k < 4); |
| 468 | union { uint32x4_t v; uint32_t us[4]; } pun = {fVec}; |
| 469 | return pun.us[k&3]; |
| 470 | } |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 471 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 472 | AI SkNx operator + (const SkNx& o) const { return vaddq_u32(fVec, o.fVec); } |
| 473 | AI SkNx operator - (const SkNx& o) const { return vsubq_u32(fVec, o.fVec); } |
| 474 | AI SkNx operator * (const SkNx& o) const { return vmulq_u32(fVec, o.fVec); } |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 475 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 476 | AI SkNx operator & (const SkNx& o) const { return vandq_u32(fVec, o.fVec); } |
| 477 | AI SkNx operator | (const SkNx& o) const { return vorrq_u32(fVec, o.fVec); } |
| 478 | AI SkNx operator ^ (const SkNx& o) const { return veorq_u32(fVec, o.fVec); } |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 479 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 480 | AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; } |
| 481 | AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; } |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 482 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 483 | AI SkNx operator == (const SkNx& o) const { return vceqq_u32(fVec, o.fVec); } |
| 484 | AI SkNx operator < (const SkNx& o) const { return vcltq_u32(fVec, o.fVec); } |
| 485 | AI SkNx operator > (const SkNx& o) const { return vcgtq_u32(fVec, o.fVec); } |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 486 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 487 | AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u32(a.fVec, b.fVec); } |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 488 | // TODO as needed |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 489 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 490 | AI SkNx thenElse(const SkNx& t, const SkNx& e) const { |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 491 | return vbslq_u32(fVec, t.fVec, e.fVec); |
| 492 | } |
| 493 | |
| 494 | uint32x4_t fVec; |
| 495 | }; |
| 496 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 497 | template<> AI /*static*/ Sk4i SkNx_cast<int32_t, float>(const Sk4f& src) { |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 498 | return vcvtq_s32_f32(src.fVec); |
| 499 | |
| 500 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 501 | template<> AI /*static*/ Sk4f SkNx_cast<float, int32_t>(const Sk4i& src) { |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 502 | return vcvtq_f32_s32(src.fVec); |
| 503 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 504 | template<> AI /*static*/ Sk4f SkNx_cast<float, uint32_t>(const Sk4u& src) { |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 505 | return SkNx_cast<float>(Sk4i::Load(&src)); |
| 506 | } |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 507 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 508 | template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) { |
mtklein | be8c19e | 2016-02-19 09:40:24 -0800 | [diff] [blame] | 509 | return vqmovn_u32(vcvtq_u32_f32(src.fVec)); |
| 510 | } |
| 511 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 512 | template<> AI /*static*/ Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) { |
mtklein | be8c19e | 2016-02-19 09:40:24 -0800 | [diff] [blame] | 513 | return vcvtq_f32_u32(vmovl_u16(src.fVec)); |
| 514 | } |
| 515 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 516 | template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) { |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 517 | uint32x4_t _32 = vcvtq_u32_f32(src.fVec); |
| 518 | uint16x4_t _16 = vqmovn_u32(_32); |
| 519 | return vqmovn_u16(vcombine_u16(_16, _16)); |
| 520 | } |
| 521 | |
Mike Klein | 06a65e2 | 2016-11-17 12:39:09 -0500 | [diff] [blame] | 522 | template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint8_t>(const Sk4b& src) { |
| 523 | uint16x8_t _16 = vmovl_u8(src.fVec); |
Mike Klein | b6ab4ae | 2016-11-17 14:33:11 -0500 | [diff] [blame] | 524 | return vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(_16))); |
Mike Klein | 06a65e2 | 2016-11-17 12:39:09 -0500 | [diff] [blame] | 525 | } |
| 526 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 527 | template<> AI /*static*/ Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) { |
Mike Klein | b6ab4ae | 2016-11-17 14:33:11 -0500 | [diff] [blame] | 528 | return vcvtq_f32_s32(SkNx_cast<int32_t>(src).fVec); |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 529 | } |
| 530 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 531 | template<> AI /*static*/ Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) { |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 532 | Sk8f ab, cd; |
| 533 | SkNx_split(src, &ab, &cd); |
| 534 | |
| 535 | Sk4f a,b,c,d; |
| 536 | SkNx_split(ab, &a, &b); |
| 537 | SkNx_split(cd, &c, &d); |
| 538 | return vuzpq_u8(vuzpq_u8((uint8x16_t)vcvtq_u32_f32(a.fVec), |
| 539 | (uint8x16_t)vcvtq_u32_f32(b.fVec)).val[0], |
| 540 | vuzpq_u8((uint8x16_t)vcvtq_u32_f32(c.fVec), |
| 541 | (uint8x16_t)vcvtq_u32_f32(d.fVec)).val[0]).val[0]; |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 542 | } |
| 543 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 544 | template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) { |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 545 | return vget_low_u16(vmovl_u8(src.fVec)); |
| 546 | } |
| 547 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 548 | template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) { |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 549 | return vmovn_u16(vcombine_u16(src.fVec, src.fVec)); |
| 550 | } |
| 551 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 552 | template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, int32_t>(const Sk4i& src) { |
msarett | 7d3ff71 | 2016-07-12 14:55:45 -0700 | [diff] [blame] | 553 | uint16x4_t _16 = vqmovun_s32(src.fVec); |
| 554 | return vqmovn_u16(vcombine_u16(_16, _16)); |
| 555 | } |
| 556 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 557 | template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint16_t>(const Sk4h& src) { |
mtklein | 58e389b | 2016-07-15 07:00:11 -0700 | [diff] [blame] | 558 | return vreinterpretq_s32_u32(vmovl_u16(src.fVec)); |
| 559 | } |
| 560 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 561 | template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, int32_t>(const Sk4i& src) { |
mtklein | 58e389b | 2016-07-15 07:00:11 -0700 | [diff] [blame] | 562 | return vmovn_u32(vreinterpretq_u32_s32(src.fVec)); |
| 563 | } |
| 564 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 565 | template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint32_t>(const Sk4u& src) { |
msarett | 15ee3de | 2016-08-02 11:30:30 -0700 | [diff] [blame] | 566 | return vreinterpretq_s32_u32(src.fVec); |
| 567 | } |
| 568 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 569 | AI static Sk4i Sk4f_round(const Sk4f& x) { |
msarett | 7d3ff71 | 2016-07-12 14:55:45 -0700 | [diff] [blame] | 570 | return vcvtq_s32_f32((x + 0.5f).fVec); |
| 571 | } |
| 572 | |
Mike Klein | 1e76464 | 2016-10-14 17:09:03 -0400 | [diff] [blame] | 573 | } // namespace |
| 574 | |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 575 | #endif//SkNx_neon_DEFINED |