mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2015 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | #ifndef SkNx_neon_DEFINED |
| 9 | #define SkNx_neon_DEFINED |
| 10 | |
mtklein | e18fa44 | 2016-06-09 13:40:56 -0700 | [diff] [blame] | 11 | #include <arm_neon.h> |
| 12 | |
Mike Klein | 1e76464 | 2016-10-14 17:09:03 -0400 | [diff] [blame] | 13 | namespace { |
| 14 | |
mtklein | 126626e | 2016-02-09 15:41:36 -0800 | [diff] [blame] | 15 | // ARMv8 has vrndmq_f32 to floor 4 floats. Here we emulate it: |
mtklein | e5fe9a4 | 2016-02-10 07:55:56 -0800 | [diff] [blame] | 16 | // - roundtrip through integers via truncation |
| 17 | // - subtract 1 if that's too big (possible for negative values). |
| 18 | // This restricts the domain of our inputs to a maximum somehwere around 2^31. Seems plenty big. |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 19 | AI static float32x4_t armv7_vrndmq_f32(float32x4_t v) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 20 | auto roundtrip = vcvtq_f32_s32(vcvtq_s32_f32(v)); |
| 21 | auto too_big = vcgtq_f32(roundtrip, v); |
| 22 | return vsubq_f32(roundtrip, (float32x4_t)vandq_u32(too_big, (uint32x4_t)vdupq_n_f32(1))); |
mtklein | 126626e | 2016-02-09 15:41:36 -0800 | [diff] [blame] | 23 | } |
| 24 | |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 25 | template <> |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 26 | class SkNx<2, float> { |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 27 | public: |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 28 | AI SkNx(float32x2_t vec) : fVec(vec) {} |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 29 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 30 | AI SkNx() {} |
| 31 | AI SkNx(float val) : fVec(vdup_n_f32(val)) {} |
| 32 | AI SkNx(float a, float b) { fVec = (float32x2_t) { a, b }; } |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 33 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 34 | AI static SkNx Load(const void* ptr) { return vld1_f32((const float*)ptr); } |
| 35 | AI void store(void* ptr) const { vst1_f32((float*)ptr, fVec); } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 36 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 37 | AI SkNx invert() const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 38 | float32x2_t est0 = vrecpe_f32(fVec), |
| 39 | est1 = vmul_f32(vrecps_f32(est0, fVec), est0); |
| 40 | return est1; |
| 41 | } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 42 | |
Chris Dalton | 7732f4f | 2017-08-28 14:45:40 -0600 | [diff] [blame] | 43 | AI SkNx operator - () const { return vneg_f32(fVec); } |
| 44 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 45 | AI SkNx operator + (const SkNx& o) const { return vadd_f32(fVec, o.fVec); } |
| 46 | AI SkNx operator - (const SkNx& o) const { return vsub_f32(fVec, o.fVec); } |
| 47 | AI SkNx operator * (const SkNx& o) const { return vmul_f32(fVec, o.fVec); } |
| 48 | AI SkNx operator / (const SkNx& o) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 49 | #if defined(SK_CPU_ARM64) |
| 50 | return vdiv_f32(fVec, o.fVec); |
| 51 | #else |
| 52 | float32x2_t est0 = vrecpe_f32(o.fVec), |
| 53 | est1 = vmul_f32(vrecps_f32(est0, o.fVec), est0), |
| 54 | est2 = vmul_f32(vrecps_f32(est1, o.fVec), est1); |
| 55 | return vmul_f32(fVec, est2); |
| 56 | #endif |
| 57 | } |
| 58 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 59 | AI SkNx operator==(const SkNx& o) const { return vreinterpret_f32_u32(vceq_f32(fVec, o.fVec)); } |
| 60 | AI SkNx operator <(const SkNx& o) const { return vreinterpret_f32_u32(vclt_f32(fVec, o.fVec)); } |
| 61 | AI SkNx operator >(const SkNx& o) const { return vreinterpret_f32_u32(vcgt_f32(fVec, o.fVec)); } |
| 62 | AI SkNx operator<=(const SkNx& o) const { return vreinterpret_f32_u32(vcle_f32(fVec, o.fVec)); } |
| 63 | AI SkNx operator>=(const SkNx& o) const { return vreinterpret_f32_u32(vcge_f32(fVec, o.fVec)); } |
| 64 | AI SkNx operator!=(const SkNx& o) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 65 | return vreinterpret_f32_u32(vmvn_u32(vceq_f32(fVec, o.fVec))); |
| 66 | } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 67 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 68 | AI static SkNx Min(const SkNx& l, const SkNx& r) { return vmin_f32(l.fVec, r.fVec); } |
| 69 | AI static SkNx Max(const SkNx& l, const SkNx& r) { return vmax_f32(l.fVec, r.fVec); } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 70 | |
Chris Dalton | 7732f4f | 2017-08-28 14:45:40 -0600 | [diff] [blame] | 71 | AI SkNx abs() const { return vabs_f32(fVec); } |
| 72 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 73 | AI SkNx rsqrt() const { |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 74 | float32x2_t est0 = vrsqrte_f32(fVec); |
mtklein | d7c014f | 2015-04-27 14:22:32 -0700 | [diff] [blame] | 75 | return vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0); |
| 76 | } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 77 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 78 | AI SkNx sqrt() const { |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 79 | #if defined(SK_CPU_ARM64) |
| 80 | return vsqrt_f32(fVec); |
| 81 | #else |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 82 | float32x2_t est0 = vrsqrte_f32(fVec), |
| 83 | est1 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0), |
| 84 | est2 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est1, est1)), est1); |
| 85 | return vmul_f32(fVec, est2); |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 86 | #endif |
| 87 | } |
| 88 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 89 | AI float operator[](int k) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 90 | SkASSERT(0 <= k && k < 2); |
| 91 | union { float32x2_t v; float fs[2]; } pun = {fVec}; |
| 92 | return pun.fs[k&1]; |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 93 | } |
| 94 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 95 | AI bool allTrue() const { |
mtklein | b5e8611 | 2015-06-24 15:18:39 -0700 | [diff] [blame] | 96 | auto v = vreinterpret_u32_f32(fVec); |
| 97 | return vget_lane_u32(v,0) && vget_lane_u32(v,1); |
| 98 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 99 | AI bool anyTrue() const { |
mtklein | b5e8611 | 2015-06-24 15:18:39 -0700 | [diff] [blame] | 100 | auto v = vreinterpret_u32_f32(fVec); |
| 101 | return vget_lane_u32(v,0) || vget_lane_u32(v,1); |
| 102 | } |
| 103 | |
Chris Dalton | 7732f4f | 2017-08-28 14:45:40 -0600 | [diff] [blame] | 104 | AI SkNx thenElse(const SkNx& t, const SkNx& e) const { |
| 105 | return vbsl_f32(vreinterpret_u32_f32(fVec), t.fVec, e.fVec); |
| 106 | } |
| 107 | |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 108 | float32x2_t fVec; |
| 109 | }; |
| 110 | |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 111 | template <> |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 112 | class SkNx<4, float> { |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 113 | public: |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 114 | AI SkNx(float32x4_t vec) : fVec(vec) {} |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 115 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 116 | AI SkNx() {} |
| 117 | AI SkNx(float val) : fVec(vdupq_n_f32(val)) {} |
| 118 | AI SkNx(float a, float b, float c, float d) { fVec = (float32x4_t) { a, b, c, d }; } |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 119 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 120 | AI static SkNx Load(const void* ptr) { return vld1q_f32((const float*)ptr); } |
| 121 | AI void store(void* ptr) const { vst1q_f32((float*)ptr, fVec); } |
Mike Klein | 33cbfd7 | 2016-10-06 11:09:27 -0400 | [diff] [blame] | 122 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 123 | AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) { |
Mike Klein | 33cbfd7 | 2016-10-06 11:09:27 -0400 | [diff] [blame] | 124 | float32x4x4_t rgba = vld4q_f32((const float*) ptr); |
| 125 | *r = rgba.val[0]; |
| 126 | *g = rgba.val[1]; |
| 127 | *b = rgba.val[2]; |
| 128 | *a = rgba.val[3]; |
| 129 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 130 | AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) { |
Mike Klein | 33cbfd7 | 2016-10-06 11:09:27 -0400 | [diff] [blame] | 131 | float32x4x4_t rgba = {{ |
| 132 | r.fVec, |
| 133 | g.fVec, |
| 134 | b.fVec, |
| 135 | a.fVec, |
| 136 | }}; |
| 137 | vst4q_f32((float*) dst, rgba); |
| 138 | } |
| 139 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 140 | AI SkNx invert() const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 141 | float32x4_t est0 = vrecpeq_f32(fVec), |
| 142 | est1 = vmulq_f32(vrecpsq_f32(est0, fVec), est0); |
| 143 | return est1; |
| 144 | } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 145 | |
Chris Dalton | 7732f4f | 2017-08-28 14:45:40 -0600 | [diff] [blame] | 146 | AI SkNx operator - () const { return vnegq_f32(fVec); } |
| 147 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 148 | AI SkNx operator + (const SkNx& o) const { return vaddq_f32(fVec, o.fVec); } |
| 149 | AI SkNx operator - (const SkNx& o) const { return vsubq_f32(fVec, o.fVec); } |
| 150 | AI SkNx operator * (const SkNx& o) const { return vmulq_f32(fVec, o.fVec); } |
| 151 | AI SkNx operator / (const SkNx& o) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 152 | #if defined(SK_CPU_ARM64) |
| 153 | return vdivq_f32(fVec, o.fVec); |
| 154 | #else |
| 155 | float32x4_t est0 = vrecpeq_f32(o.fVec), |
| 156 | est1 = vmulq_f32(vrecpsq_f32(est0, o.fVec), est0), |
| 157 | est2 = vmulq_f32(vrecpsq_f32(est1, o.fVec), est1); |
| 158 | return vmulq_f32(fVec, est2); |
| 159 | #endif |
| 160 | } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 161 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 162 | AI SkNx operator==(const SkNx& o) const {return vreinterpretq_f32_u32(vceqq_f32(fVec, o.fVec));} |
| 163 | AI SkNx operator <(const SkNx& o) const {return vreinterpretq_f32_u32(vcltq_f32(fVec, o.fVec));} |
| 164 | AI SkNx operator >(const SkNx& o) const {return vreinterpretq_f32_u32(vcgtq_f32(fVec, o.fVec));} |
| 165 | AI SkNx operator<=(const SkNx& o) const {return vreinterpretq_f32_u32(vcleq_f32(fVec, o.fVec));} |
| 166 | AI SkNx operator>=(const SkNx& o) const {return vreinterpretq_f32_u32(vcgeq_f32(fVec, o.fVec));} |
| 167 | AI SkNx operator!=(const SkNx& o) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 168 | return vreinterpretq_f32_u32(vmvnq_u32(vceqq_f32(fVec, o.fVec))); |
| 169 | } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 170 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 171 | AI static SkNx Min(const SkNx& l, const SkNx& r) { return vminq_f32(l.fVec, r.fVec); } |
| 172 | AI static SkNx Max(const SkNx& l, const SkNx& r) { return vmaxq_f32(l.fVec, r.fVec); } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 173 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 174 | AI SkNx abs() const { return vabsq_f32(fVec); } |
| 175 | AI SkNx floor() const { |
mtklein | 126626e | 2016-02-09 15:41:36 -0800 | [diff] [blame] | 176 | #if defined(SK_CPU_ARM64) |
| 177 | return vrndmq_f32(fVec); |
| 178 | #else |
| 179 | return armv7_vrndmq_f32(fVec); |
| 180 | #endif |
| 181 | } |
| 182 | |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 183 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 184 | AI SkNx rsqrt() const { |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 185 | float32x4_t est0 = vrsqrteq_f32(fVec); |
mtklein | d7c014f | 2015-04-27 14:22:32 -0700 | [diff] [blame] | 186 | return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0); |
| 187 | } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 188 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 189 | AI SkNx sqrt() const { |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 190 | #if defined(SK_CPU_ARM64) |
| 191 | return vsqrtq_f32(fVec); |
| 192 | #else |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 193 | float32x4_t est0 = vrsqrteq_f32(fVec), |
| 194 | est1 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0), |
| 195 | est2 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est1, est1)), est1); |
| 196 | return vmulq_f32(fVec, est2); |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 197 | #endif |
| 198 | } |
| 199 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 200 | AI float operator[](int k) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 201 | SkASSERT(0 <= k && k < 4); |
| 202 | union { float32x4_t v; float fs[4]; } pun = {fVec}; |
| 203 | return pun.fs[k&3]; |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 204 | } |
| 205 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 206 | AI bool allTrue() const { |
mtklein | b5e8611 | 2015-06-24 15:18:39 -0700 | [diff] [blame] | 207 | auto v = vreinterpretq_u32_f32(fVec); |
| 208 | return vgetq_lane_u32(v,0) && vgetq_lane_u32(v,1) |
| 209 | && vgetq_lane_u32(v,2) && vgetq_lane_u32(v,3); |
| 210 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 211 | AI bool anyTrue() const { |
mtklein | b5e8611 | 2015-06-24 15:18:39 -0700 | [diff] [blame] | 212 | auto v = vreinterpretq_u32_f32(fVec); |
| 213 | return vgetq_lane_u32(v,0) || vgetq_lane_u32(v,1) |
| 214 | || vgetq_lane_u32(v,2) || vgetq_lane_u32(v,3); |
| 215 | } |
| 216 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 217 | AI SkNx thenElse(const SkNx& t, const SkNx& e) const { |
mtklein | cf4e567 | 2015-07-27 06:12:05 -0700 | [diff] [blame] | 218 | return vbslq_f32(vreinterpretq_u32_f32(fVec), t.fVec, e.fVec); |
mtklein | 2aab22a | 2015-06-26 10:46:31 -0700 | [diff] [blame] | 219 | } |
| 220 | |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 221 | float32x4_t fVec; |
| 222 | }; |
| 223 | |
Mike Klein | f0348c2 | 2016-11-03 14:43:48 -0400 | [diff] [blame] | 224 | #if defined(SK_CPU_ARM64) |
| 225 | AI static Sk4f SkNx_fma(const Sk4f& f, const Sk4f& m, const Sk4f& a) { |
| 226 | return vfmaq_f32(a.fVec, f.fVec, m.fVec); |
| 227 | } |
| 228 | #endif |
| 229 | |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 230 | // It's possible that for our current use cases, representing this as |
| 231 | // half a uint16x8_t might be better than representing it as a uint16x4_t. |
| 232 | // It'd make conversion to Sk4b one step simpler. |
| 233 | template <> |
| 234 | class SkNx<4, uint16_t> { |
| 235 | public: |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 236 | AI SkNx(const uint16x4_t& vec) : fVec(vec) {} |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 237 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 238 | AI SkNx() {} |
| 239 | AI SkNx(uint16_t val) : fVec(vdup_n_u16(val)) {} |
| 240 | AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 241 | fVec = (uint16x4_t) { a,b,c,d }; |
| 242 | } |
| 243 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 244 | AI static SkNx Load(const void* ptr) { return vld1_u16((const uint16_t*)ptr); } |
| 245 | AI void store(void* ptr) const { vst1_u16((uint16_t*)ptr, fVec); } |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 246 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 247 | AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) { |
Mike Klein | 33cbfd7 | 2016-10-06 11:09:27 -0400 | [diff] [blame] | 248 | uint16x4x4_t rgba = vld4_u16((const uint16_t*)ptr); |
| 249 | *r = rgba.val[0]; |
| 250 | *g = rgba.val[1]; |
| 251 | *b = rgba.val[2]; |
| 252 | *a = rgba.val[3]; |
| 253 | } |
Matt Sarett | 5bee0b6 | 2017-01-19 12:04:32 -0500 | [diff] [blame] | 254 | AI static void Load3(const void* ptr, SkNx* r, SkNx* g, SkNx* b) { |
| 255 | uint16x4x3_t rgba = vld3_u16((const uint16_t*)ptr); |
| 256 | *r = rgba.val[0]; |
| 257 | *g = rgba.val[1]; |
| 258 | *b = rgba.val[2]; |
| 259 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 260 | AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) { |
Mike Klein | 33cbfd7 | 2016-10-06 11:09:27 -0400 | [diff] [blame] | 261 | uint16x4x4_t rgba = {{ |
| 262 | r.fVec, |
| 263 | g.fVec, |
| 264 | b.fVec, |
| 265 | a.fVec, |
| 266 | }}; |
| 267 | vst4_u16((uint16_t*) dst, rgba); |
| 268 | } |
| 269 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 270 | AI SkNx operator + (const SkNx& o) const { return vadd_u16(fVec, o.fVec); } |
| 271 | AI SkNx operator - (const SkNx& o) const { return vsub_u16(fVec, o.fVec); } |
| 272 | AI SkNx operator * (const SkNx& o) const { return vmul_u16(fVec, o.fVec); } |
Matt Sarett | 379938e | 2017-01-12 18:34:29 -0500 | [diff] [blame] | 273 | AI SkNx operator & (const SkNx& o) const { return vand_u16(fVec, o.fVec); } |
| 274 | AI SkNx operator | (const SkNx& o) const { return vorr_u16(fVec, o.fVec); } |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 275 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 276 | AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; } |
| 277 | AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; } |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 278 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 279 | AI static SkNx Min(const SkNx& a, const SkNx& b) { return vmin_u16(a.fVec, b.fVec); } |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 280 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 281 | AI uint16_t operator[](int k) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 282 | SkASSERT(0 <= k && k < 4); |
| 283 | union { uint16x4_t v; uint16_t us[4]; } pun = {fVec}; |
| 284 | return pun.us[k&3]; |
| 285 | } |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 286 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 287 | AI SkNx thenElse(const SkNx& t, const SkNx& e) const { |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 288 | return vbsl_u16(fVec, t.fVec, e.fVec); |
| 289 | } |
| 290 | |
| 291 | uint16x4_t fVec; |
| 292 | }; |
| 293 | |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 294 | template <> |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 295 | class SkNx<8, uint16_t> { |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 296 | public: |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 297 | AI SkNx(const uint16x8_t& vec) : fVec(vec) {} |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 298 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 299 | AI SkNx() {} |
| 300 | AI SkNx(uint16_t val) : fVec(vdupq_n_u16(val)) {} |
| 301 | AI static SkNx Load(const void* ptr) { return vld1q_u16((const uint16_t*)ptr); } |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 302 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 303 | AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d, |
| 304 | uint16_t e, uint16_t f, uint16_t g, uint16_t h) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 305 | fVec = (uint16x8_t) { a,b,c,d, e,f,g,h }; |
| 306 | } |
| 307 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 308 | AI void store(void* ptr) const { vst1q_u16((uint16_t*)ptr, fVec); } |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 309 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 310 | AI SkNx operator + (const SkNx& o) const { return vaddq_u16(fVec, o.fVec); } |
| 311 | AI SkNx operator - (const SkNx& o) const { return vsubq_u16(fVec, o.fVec); } |
| 312 | AI SkNx operator * (const SkNx& o) const { return vmulq_u16(fVec, o.fVec); } |
Matt Sarett | 379938e | 2017-01-12 18:34:29 -0500 | [diff] [blame] | 313 | AI SkNx operator & (const SkNx& o) const { return vandq_u16(fVec, o.fVec); } |
| 314 | AI SkNx operator | (const SkNx& o) const { return vorrq_u16(fVec, o.fVec); } |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 315 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 316 | AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; } |
| 317 | AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; } |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 318 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 319 | AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u16(a.fVec, b.fVec); } |
mtklein | 27e517a | 2015-05-14 17:53:04 -0700 | [diff] [blame] | 320 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 321 | AI uint16_t operator[](int k) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 322 | SkASSERT(0 <= k && k < 8); |
| 323 | union { uint16x8_t v; uint16_t us[8]; } pun = {fVec}; |
| 324 | return pun.us[k&7]; |
| 325 | } |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 326 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 327 | AI SkNx thenElse(const SkNx& t, const SkNx& e) const { |
mtklein | cf4e567 | 2015-07-27 06:12:05 -0700 | [diff] [blame] | 328 | return vbslq_u16(fVec, t.fVec, e.fVec); |
mtklein | 4be181e | 2015-07-14 10:54:19 -0700 | [diff] [blame] | 329 | } |
| 330 | |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 331 | uint16x8_t fVec; |
| 332 | }; |
| 333 | |
| 334 | template <> |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 335 | class SkNx<4, uint8_t> { |
| 336 | public: |
mtklein | a5e1e33 | 2016-07-26 10:07:34 -0700 | [diff] [blame] | 337 | typedef uint32_t __attribute__((aligned(1))) unaligned_uint32_t; |
| 338 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 339 | AI SkNx(const uint8x8_t& vec) : fVec(vec) {} |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 340 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 341 | AI SkNx() {} |
| 342 | AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 343 | fVec = (uint8x8_t){a,b,c,d, 0,0,0,0}; |
| 344 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 345 | AI static SkNx Load(const void* ptr) { |
mtklein | a5e1e33 | 2016-07-26 10:07:34 -0700 | [diff] [blame] | 346 | return (uint8x8_t)vld1_dup_u32((const unaligned_uint32_t*)ptr); |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 347 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 348 | AI void store(void* ptr) const { |
mtklein | a5e1e33 | 2016-07-26 10:07:34 -0700 | [diff] [blame] | 349 | return vst1_lane_u32((unaligned_uint32_t*)ptr, (uint32x2_t)fVec, 0); |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 350 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 351 | AI uint8_t operator[](int k) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 352 | SkASSERT(0 <= k && k < 4); |
| 353 | union { uint8x8_t v; uint8_t us[8]; } pun = {fVec}; |
| 354 | return pun.us[k&3]; |
| 355 | } |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 356 | |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 357 | // TODO as needed |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 358 | |
| 359 | uint8x8_t fVec; |
| 360 | }; |
| 361 | |
| 362 | template <> |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 363 | class SkNx<16, uint8_t> { |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 364 | public: |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 365 | AI SkNx(const uint8x16_t& vec) : fVec(vec) {} |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 366 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 367 | AI SkNx() {} |
| 368 | AI SkNx(uint8_t val) : fVec(vdupq_n_u8(val)) {} |
| 369 | AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d, |
| 370 | uint8_t e, uint8_t f, uint8_t g, uint8_t h, |
| 371 | uint8_t i, uint8_t j, uint8_t k, uint8_t l, |
| 372 | uint8_t m, uint8_t n, uint8_t o, uint8_t p) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 373 | fVec = (uint8x16_t) { a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p }; |
| 374 | } |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 375 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 376 | AI static SkNx Load(const void* ptr) { return vld1q_u8((const uint8_t*)ptr); } |
| 377 | AI void store(void* ptr) const { vst1q_u8((uint8_t*)ptr, fVec); } |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 378 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 379 | AI SkNx saturatedAdd(const SkNx& o) const { return vqaddq_u8(fVec, o.fVec); } |
mtklein | 04d24a3 | 2015-05-13 08:02:14 -0700 | [diff] [blame] | 380 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 381 | AI SkNx operator + (const SkNx& o) const { return vaddq_u8(fVec, o.fVec); } |
| 382 | AI SkNx operator - (const SkNx& o) const { return vsubq_u8(fVec, o.fVec); } |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 383 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 384 | AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u8(a.fVec, b.fVec); } |
| 385 | AI SkNx operator < (const SkNx& o) const { return vcltq_u8(fVec, o.fVec); } |
mtklein | 27e517a | 2015-05-14 17:53:04 -0700 | [diff] [blame] | 386 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 387 | AI uint8_t operator[](int k) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 388 | SkASSERT(0 <= k && k < 16); |
| 389 | union { uint8x16_t v; uint8_t us[16]; } pun = {fVec}; |
| 390 | return pun.us[k&15]; |
| 391 | } |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 392 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 393 | AI SkNx thenElse(const SkNx& t, const SkNx& e) const { |
mtklein | cf4e567 | 2015-07-27 06:12:05 -0700 | [diff] [blame] | 394 | return vbslq_u8(fVec, t.fVec, e.fVec); |
mtklein | b5e8611 | 2015-06-24 15:18:39 -0700 | [diff] [blame] | 395 | } |
| 396 | |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 397 | uint8x16_t fVec; |
| 398 | }; |
| 399 | |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 400 | template <> |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 401 | class SkNx<4, int32_t> { |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 402 | public: |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 403 | AI SkNx(const int32x4_t& vec) : fVec(vec) {} |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 404 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 405 | AI SkNx() {} |
| 406 | AI SkNx(int32_t v) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 407 | fVec = vdupq_n_s32(v); |
| 408 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 409 | AI SkNx(int32_t a, int32_t b, int32_t c, int32_t d) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 410 | fVec = (int32x4_t){a,b,c,d}; |
| 411 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 412 | AI static SkNx Load(const void* ptr) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 413 | return vld1q_s32((const int32_t*)ptr); |
| 414 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 415 | AI void store(void* ptr) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 416 | return vst1q_s32((int32_t*)ptr, fVec); |
| 417 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 418 | AI int32_t operator[](int k) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 419 | SkASSERT(0 <= k && k < 4); |
| 420 | union { int32x4_t v; int32_t is[4]; } pun = {fVec}; |
| 421 | return pun.is[k&3]; |
| 422 | } |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 423 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 424 | AI SkNx operator + (const SkNx& o) const { return vaddq_s32(fVec, o.fVec); } |
| 425 | AI SkNx operator - (const SkNx& o) const { return vsubq_s32(fVec, o.fVec); } |
| 426 | AI SkNx operator * (const SkNx& o) const { return vmulq_s32(fVec, o.fVec); } |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 427 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 428 | AI SkNx operator & (const SkNx& o) const { return vandq_s32(fVec, o.fVec); } |
| 429 | AI SkNx operator | (const SkNx& o) const { return vorrq_s32(fVec, o.fVec); } |
| 430 | AI SkNx operator ^ (const SkNx& o) const { return veorq_s32(fVec, o.fVec); } |
mtklein | 64f061a | 2016-06-17 12:09:16 -0700 | [diff] [blame] | 431 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 432 | AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; } |
| 433 | AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; } |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 434 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 435 | AI SkNx operator == (const SkNx& o) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 436 | return vreinterpretq_s32_u32(vceqq_s32(fVec, o.fVec)); |
| 437 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 438 | AI SkNx operator < (const SkNx& o) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 439 | return vreinterpretq_s32_u32(vcltq_s32(fVec, o.fVec)); |
| 440 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 441 | AI SkNx operator > (const SkNx& o) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 442 | return vreinterpretq_s32_u32(vcgtq_s32(fVec, o.fVec)); |
| 443 | } |
mtklein | 58e389b | 2016-07-15 07:00:11 -0700 | [diff] [blame] | 444 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 445 | AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_s32(a.fVec, b.fVec); } |
Yuqian Li | 7da6ba2 | 2017-07-12 13:36:05 -0400 | [diff] [blame] | 446 | AI static SkNx Max(const SkNx& a, const SkNx& b) { return vmaxq_s32(a.fVec, b.fVec); } |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 447 | // TODO as needed |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 448 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 449 | AI SkNx thenElse(const SkNx& t, const SkNx& e) const { |
mtklein | 58e389b | 2016-07-15 07:00:11 -0700 | [diff] [blame] | 450 | return vbslq_s32(vreinterpretq_u32_s32(fVec), t.fVec, e.fVec); |
| 451 | } |
| 452 | |
Yuqian Li | 7da6ba2 | 2017-07-12 13:36:05 -0400 | [diff] [blame] | 453 | AI SkNx abs() const { return vabsq_s32(fVec); } |
| 454 | |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 455 | int32x4_t fVec; |
| 456 | }; |
| 457 | |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 458 | template <> |
| 459 | class SkNx<4, uint32_t> { |
| 460 | public: |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 461 | AI SkNx(const uint32x4_t& vec) : fVec(vec) {} |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 462 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 463 | AI SkNx() {} |
| 464 | AI SkNx(uint32_t v) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 465 | fVec = vdupq_n_u32(v); |
| 466 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 467 | AI SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 468 | fVec = (uint32x4_t){a,b,c,d}; |
| 469 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 470 | AI static SkNx Load(const void* ptr) { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 471 | return vld1q_u32((const uint32_t*)ptr); |
| 472 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 473 | AI void store(void* ptr) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 474 | return vst1q_u32((uint32_t*)ptr, fVec); |
| 475 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 476 | AI uint32_t operator[](int k) const { |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 477 | SkASSERT(0 <= k && k < 4); |
| 478 | union { uint32x4_t v; uint32_t us[4]; } pun = {fVec}; |
| 479 | return pun.us[k&3]; |
| 480 | } |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 481 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 482 | AI SkNx operator + (const SkNx& o) const { return vaddq_u32(fVec, o.fVec); } |
| 483 | AI SkNx operator - (const SkNx& o) const { return vsubq_u32(fVec, o.fVec); } |
| 484 | AI SkNx operator * (const SkNx& o) const { return vmulq_u32(fVec, o.fVec); } |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 485 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 486 | AI SkNx operator & (const SkNx& o) const { return vandq_u32(fVec, o.fVec); } |
| 487 | AI SkNx operator | (const SkNx& o) const { return vorrq_u32(fVec, o.fVec); } |
| 488 | AI SkNx operator ^ (const SkNx& o) const { return veorq_u32(fVec, o.fVec); } |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 489 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 490 | AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; } |
| 491 | AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; } |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 492 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 493 | AI SkNx operator == (const SkNx& o) const { return vceqq_u32(fVec, o.fVec); } |
| 494 | AI SkNx operator < (const SkNx& o) const { return vcltq_u32(fVec, o.fVec); } |
| 495 | AI SkNx operator > (const SkNx& o) const { return vcgtq_u32(fVec, o.fVec); } |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 496 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 497 | AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u32(a.fVec, b.fVec); } |
mtklein | 7c0db75 | 2016-07-30 14:18:49 -0700 | [diff] [blame] | 498 | // TODO as needed |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 499 | |
Herb Derby | 5eb1528 | 2017-10-10 17:14:18 -0400 | [diff] [blame^] | 500 | AI SkNx mulHi(const SkNx& m) const { |
| 501 | uint64x2_t hi = vmull_u32(vget_high_u32(fVec), vget_high_u32(m.fVec)); |
| 502 | uint64x2_t lo = vmull_u32( vget_low_u32(fVec), vget_low_u32(m.fVec)); |
| 503 | |
| 504 | return { vcombine_u32(vshrn_n_u64(lo,32), vshrn_n_u64(hi,32)) }; |
| 505 | } |
| 506 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 507 | AI SkNx thenElse(const SkNx& t, const SkNx& e) const { |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 508 | return vbslq_u32(fVec, t.fVec, e.fVec); |
| 509 | } |
| 510 | |
| 511 | uint32x4_t fVec; |
| 512 | }; |
| 513 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 514 | template<> AI /*static*/ Sk4i SkNx_cast<int32_t, float>(const Sk4f& src) { |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 515 | return vcvtq_s32_f32(src.fVec); |
| 516 | |
| 517 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 518 | template<> AI /*static*/ Sk4f SkNx_cast<float, int32_t>(const Sk4i& src) { |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 519 | return vcvtq_f32_s32(src.fVec); |
| 520 | } |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 521 | template<> AI /*static*/ Sk4f SkNx_cast<float, uint32_t>(const Sk4u& src) { |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 522 | return SkNx_cast<float>(Sk4i::Load(&src)); |
| 523 | } |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 524 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 525 | template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) { |
mtklein | be8c19e | 2016-02-19 09:40:24 -0800 | [diff] [blame] | 526 | return vqmovn_u32(vcvtq_u32_f32(src.fVec)); |
| 527 | } |
| 528 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 529 | template<> AI /*static*/ Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) { |
mtklein | be8c19e | 2016-02-19 09:40:24 -0800 | [diff] [blame] | 530 | return vcvtq_f32_u32(vmovl_u16(src.fVec)); |
| 531 | } |
| 532 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 533 | template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) { |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 534 | uint32x4_t _32 = vcvtq_u32_f32(src.fVec); |
| 535 | uint16x4_t _16 = vqmovn_u32(_32); |
| 536 | return vqmovn_u16(vcombine_u16(_16, _16)); |
| 537 | } |
| 538 | |
Herb Derby | 5eb1528 | 2017-10-10 17:14:18 -0400 | [diff] [blame^] | 539 | template<> AI /*static*/ Sk4u SkNx_cast<uint32_t, uint8_t>(const Sk4b& src) { |
Mike Klein | 06a65e2 | 2016-11-17 12:39:09 -0500 | [diff] [blame] | 540 | uint16x8_t _16 = vmovl_u8(src.fVec); |
Herb Derby | 5eb1528 | 2017-10-10 17:14:18 -0400 | [diff] [blame^] | 541 | return vmovl_u16(vget_low_u16(_16)); |
| 542 | } |
| 543 | |
| 544 | template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint8_t>(const Sk4b& src) { |
| 545 | return vreinterpretq_s32_u32(SkNx_cast<uint32_t>(src).fVec); |
Mike Klein | 06a65e2 | 2016-11-17 12:39:09 -0500 | [diff] [blame] | 546 | } |
| 547 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 548 | template<> AI /*static*/ Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) { |
Mike Klein | b6ab4ae | 2016-11-17 14:33:11 -0500 | [diff] [blame] | 549 | return vcvtq_f32_s32(SkNx_cast<int32_t>(src).fVec); |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 550 | } |
| 551 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 552 | template<> AI /*static*/ Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) { |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 553 | Sk8f ab, cd; |
| 554 | SkNx_split(src, &ab, &cd); |
| 555 | |
| 556 | Sk4f a,b,c,d; |
| 557 | SkNx_split(ab, &a, &b); |
| 558 | SkNx_split(cd, &c, &d); |
| 559 | return vuzpq_u8(vuzpq_u8((uint8x16_t)vcvtq_u32_f32(a.fVec), |
| 560 | (uint8x16_t)vcvtq_u32_f32(b.fVec)).val[0], |
| 561 | vuzpq_u8((uint8x16_t)vcvtq_u32_f32(c.fVec), |
| 562 | (uint8x16_t)vcvtq_u32_f32(d.fVec)).val[0]).val[0]; |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 563 | } |
| 564 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 565 | template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) { |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 566 | return vget_low_u16(vmovl_u8(src.fVec)); |
| 567 | } |
| 568 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 569 | template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) { |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 570 | return vmovn_u16(vcombine_u16(src.fVec, src.fVec)); |
| 571 | } |
| 572 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 573 | template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, int32_t>(const Sk4i& src) { |
msarett | 7d3ff71 | 2016-07-12 14:55:45 -0700 | [diff] [blame] | 574 | uint16x4_t _16 = vqmovun_s32(src.fVec); |
| 575 | return vqmovn_u16(vcombine_u16(_16, _16)); |
| 576 | } |
| 577 | |
Herb Derby | 0f96bb3 | 2017-09-13 16:46:05 -0400 | [diff] [blame] | 578 | template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, uint32_t>(const Sk4u& src) { |
| 579 | uint16x4_t _16 = vqmovn_u32(src.fVec); |
| 580 | return vqmovn_u16(vcombine_u16(_16, _16)); |
| 581 | } |
| 582 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 583 | template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint16_t>(const Sk4h& src) { |
mtklein | 58e389b | 2016-07-15 07:00:11 -0700 | [diff] [blame] | 584 | return vreinterpretq_s32_u32(vmovl_u16(src.fVec)); |
| 585 | } |
| 586 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 587 | template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, int32_t>(const Sk4i& src) { |
mtklein | 58e389b | 2016-07-15 07:00:11 -0700 | [diff] [blame] | 588 | return vmovn_u32(vreinterpretq_u32_s32(src.fVec)); |
| 589 | } |
| 590 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 591 | template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint32_t>(const Sk4u& src) { |
msarett | 15ee3de | 2016-08-02 11:30:30 -0700 | [diff] [blame] | 592 | return vreinterpretq_s32_u32(src.fVec); |
| 593 | } |
| 594 | |
Mike Klein | 7c78f3a | 2016-10-19 09:21:11 -0400 | [diff] [blame] | 595 | AI static Sk4i Sk4f_round(const Sk4f& x) { |
msarett | 7d3ff71 | 2016-07-12 14:55:45 -0700 | [diff] [blame] | 596 | return vcvtq_s32_f32((x + 0.5f).fVec); |
| 597 | } |
| 598 | |
Mike Klein | 1e76464 | 2016-10-14 17:09:03 -0400 | [diff] [blame] | 599 | } // namespace |
| 600 | |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 601 | #endif//SkNx_neon_DEFINED |