mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2015 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | #ifndef SkNx_sse_DEFINED |
| 9 | #define SkNx_sse_DEFINED |
| 10 | |
mtklein | e18fa44 | 2016-06-09 13:40:56 -0700 | [diff] [blame] | 11 | #include <immintrin.h> |
mtklein | 244a653 | 2016-04-19 14:21:30 -0700 | [diff] [blame] | 12 | |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 13 | // This file may assume <= SSE2, but must check SK_CPU_SSE_LEVEL for anything more recent. |
mtklein | e4c0bee | 2016-02-09 10:35:27 -0800 | [diff] [blame] | 14 | // If you do, make sure this is in a static inline function... anywhere else risks violating ODR. |
mtklein | aa999cb | 2015-05-22 17:18:21 -0700 | [diff] [blame] | 15 | |
fmalita | c2e0ac4 | 2015-12-03 09:15:25 -0800 | [diff] [blame] | 16 | #define SKNX_IS_FAST |
| 17 | |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 18 | template <> |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 19 | class SkNx<2, float> { |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 20 | public: |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 21 | SkNx(const __m128& vec) : fVec(vec) {} |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 22 | |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 23 | SkNx() {} |
| 24 | SkNx(float val) : fVec(_mm_set1_ps(val)) {} |
mtklein | 507ef6d | 2016-01-31 08:02:47 -0800 | [diff] [blame] | 25 | static SkNx Load(const void* ptr) { |
| 26 | return _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*)ptr)); |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 27 | } |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 28 | SkNx(float a, float b) : fVec(_mm_setr_ps(a,b,0,0)) {} |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 29 | |
mtklein | 507ef6d | 2016-01-31 08:02:47 -0800 | [diff] [blame] | 30 | void store(void* ptr) const { _mm_storel_pi((__m64*)ptr, fVec); } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 31 | |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 32 | SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); } |
| 33 | SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); } |
| 34 | SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); } |
| 35 | SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 36 | |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 37 | SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec); } |
| 38 | SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec); } |
| 39 | SkNx operator < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec); } |
| 40 | SkNx operator > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec); } |
| 41 | SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec); } |
| 42 | SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec); } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 43 | |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 44 | static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.fVec); } |
| 45 | static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 46 | |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 47 | SkNx sqrt() const { return _mm_sqrt_ps (fVec); } |
| 48 | SkNx rsqrt() const { return _mm_rsqrt_ps(fVec); } |
| 49 | SkNx invert() const { return _mm_rcp_ps(fVec); } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 50 | |
mtklein | e4c0bee | 2016-02-09 10:35:27 -0800 | [diff] [blame] | 51 | float operator[](int k) const { |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 52 | SkASSERT(0 <= k && k < 2); |
| 53 | union { __m128 v; float fs[4]; } pun = {fVec}; |
mtklein | a156a8f | 2015-04-03 06:16:13 -0700 | [diff] [blame] | 54 | return pun.fs[k&1]; |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 55 | } |
| 56 | |
mtklein | b5e8611 | 2015-06-24 15:18:39 -0700 | [diff] [blame] | 57 | bool allTrue() const { return 0xff == (_mm_movemask_epi8(_mm_castps_si128(fVec)) & 0xff); } |
| 58 | bool anyTrue() const { return 0x00 != (_mm_movemask_epi8(_mm_castps_si128(fVec)) & 0xff); } |
| 59 | |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 60 | __m128 fVec; |
| 61 | }; |
| 62 | |
| 63 | template <> |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 64 | class SkNx<4, float> { |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 65 | public: |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 66 | SkNx(const __m128& vec) : fVec(vec) {} |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 67 | |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 68 | SkNx() {} |
| 69 | SkNx(float val) : fVec( _mm_set1_ps(val) ) {} |
mtklein | 507ef6d | 2016-01-31 08:02:47 -0800 | [diff] [blame] | 70 | static SkNx Load(const void* ptr) { return _mm_loadu_ps((const float*)ptr); } |
mtklein | aba1dc8 | 2015-08-31 14:39:59 -0700 | [diff] [blame] | 71 | |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 72 | SkNx(float a, float b, float c, float d) : fVec(_mm_setr_ps(a,b,c,d)) {} |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 73 | |
mtklein | 507ef6d | 2016-01-31 08:02:47 -0800 | [diff] [blame] | 74 | void store(void* ptr) const { _mm_storeu_ps((float*)ptr, fVec); } |
mtklein | 9db43ac | 2015-12-01 07:10:21 -0800 | [diff] [blame] | 75 | |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 76 | SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); } |
| 77 | SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); } |
| 78 | SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); } |
| 79 | SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 80 | |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 81 | SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec); } |
| 82 | SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec); } |
| 83 | SkNx operator < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec); } |
| 84 | SkNx operator > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec); } |
| 85 | SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec); } |
| 86 | SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec); } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 87 | |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 88 | static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.fVec); } |
| 89 | static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 90 | |
mtklein | c33065a | 2016-01-15 12:16:40 -0800 | [diff] [blame] | 91 | SkNx abs() const { return _mm_andnot_ps(_mm_set1_ps(-0.0f), fVec); } |
mtklein | 244a653 | 2016-04-19 14:21:30 -0700 | [diff] [blame] | 92 | SkNx floor() const { |
mtklein | 5608e2e | 2016-07-11 09:59:21 -0700 | [diff] [blame] | 93 | #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 |
| 94 | return _mm_floor_ps(fVec); |
| 95 | #else |
mtklein | 244a653 | 2016-04-19 14:21:30 -0700 | [diff] [blame] | 96 | // Emulate _mm_floor_ps() with SSE2: |
| 97 | // - roundtrip through integers via truncation |
| 98 | // - subtract 1 if that's too big (possible for negative values). |
| 99 | // This restricts the domain of our inputs to a maximum somehwere around 2^31. |
| 100 | // Seems plenty big. |
| 101 | __m128 roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(fVec)); |
| 102 | __m128 too_big = _mm_cmpgt_ps(roundtrip, fVec); |
| 103 | return _mm_sub_ps(roundtrip, _mm_and_ps(too_big, _mm_set1_ps(1.0f))); |
mtklein | 5608e2e | 2016-07-11 09:59:21 -0700 | [diff] [blame] | 104 | #endif |
mtklein | 244a653 | 2016-04-19 14:21:30 -0700 | [diff] [blame] | 105 | } |
mtklein | c33065a | 2016-01-15 12:16:40 -0800 | [diff] [blame] | 106 | |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 107 | SkNx sqrt() const { return _mm_sqrt_ps (fVec); } |
| 108 | SkNx rsqrt() const { return _mm_rsqrt_ps(fVec); } |
| 109 | SkNx invert() const { return _mm_rcp_ps(fVec); } |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 110 | |
mtklein | e4c0bee | 2016-02-09 10:35:27 -0800 | [diff] [blame] | 111 | float operator[](int k) const { |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 112 | SkASSERT(0 <= k && k < 4); |
| 113 | union { __m128 v; float fs[4]; } pun = {fVec}; |
mtklein | a156a8f | 2015-04-03 06:16:13 -0700 | [diff] [blame] | 114 | return pun.fs[k&3]; |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 115 | } |
| 116 | |
mtklein | b5e8611 | 2015-06-24 15:18:39 -0700 | [diff] [blame] | 117 | bool allTrue() const { return 0xffff == _mm_movemask_epi8(_mm_castps_si128(fVec)); } |
| 118 | bool anyTrue() const { return 0x0000 != _mm_movemask_epi8(_mm_castps_si128(fVec)); } |
| 119 | |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 120 | SkNx thenElse(const SkNx& t, const SkNx& e) const { |
mtklein | 5608e2e | 2016-07-11 09:59:21 -0700 | [diff] [blame] | 121 | #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 |
msarett | d280957 | 2016-06-20 06:07:45 -0700 | [diff] [blame] | 122 | return _mm_blendv_ps(e.fVec, t.fVec, fVec); |
mtklein | 5608e2e | 2016-07-11 09:59:21 -0700 | [diff] [blame] | 123 | #else |
mtklein | 2aab22a | 2015-06-26 10:46:31 -0700 | [diff] [blame] | 124 | return _mm_or_ps(_mm_and_ps (fVec, t.fVec), |
| 125 | _mm_andnot_ps(fVec, e.fVec)); |
mtklein | 5608e2e | 2016-07-11 09:59:21 -0700 | [diff] [blame] | 126 | #endif |
mtklein | 2aab22a | 2015-06-26 10:46:31 -0700 | [diff] [blame] | 127 | } |
| 128 | |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 129 | __m128 fVec; |
| 130 | }; |
| 131 | |
mtklein | 115acee | 2015-04-14 14:02:52 -0700 | [diff] [blame] | 132 | template <> |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 133 | class SkNx<4, int32_t> { |
mtklein | 8273ca4 | 2016-02-09 11:32:51 -0800 | [diff] [blame] | 134 | public: |
| 135 | SkNx(const __m128i& vec) : fVec(vec) {} |
| 136 | |
| 137 | SkNx() {} |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 138 | SkNx(int32_t val) : fVec(_mm_set1_epi32(val)) {} |
mtklein | 8273ca4 | 2016-02-09 11:32:51 -0800 | [diff] [blame] | 139 | static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); } |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 140 | SkNx(int32_t a, int32_t b, int32_t c, int32_t d) : fVec(_mm_setr_epi32(a,b,c,d)) {} |
mtklein | 8273ca4 | 2016-02-09 11:32:51 -0800 | [diff] [blame] | 141 | |
| 142 | void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); } |
| 143 | |
| 144 | SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec); } |
| 145 | SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec); } |
| 146 | SkNx operator * (const SkNx& o) const { |
| 147 | __m128i mul20 = _mm_mul_epu32(fVec, o.fVec), |
| 148 | mul31 = _mm_mul_epu32(_mm_srli_si128(fVec, 4), _mm_srli_si128(o.fVec, 4)); |
| 149 | return _mm_unpacklo_epi32(_mm_shuffle_epi32(mul20, _MM_SHUFFLE(0,0,2,0)), |
| 150 | _mm_shuffle_epi32(mul31, _MM_SHUFFLE(0,0,2,0))); |
| 151 | } |
| 152 | |
mtklein | 281b33f | 2016-07-12 15:01:26 -0700 | [diff] [blame] | 153 | SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); } |
mtklein | 64f061a | 2016-06-17 12:09:16 -0700 | [diff] [blame] | 154 | SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); } |
mtklein | 58e389b | 2016-07-15 07:00:11 -0700 | [diff] [blame] | 155 | SkNx operator ^ (const SkNx& o) const { return _mm_xor_si128(fVec, o.fVec); } |
mtklein | 64f061a | 2016-06-17 12:09:16 -0700 | [diff] [blame] | 156 | |
mtklein | 8273ca4 | 2016-02-09 11:32:51 -0800 | [diff] [blame] | 157 | SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); } |
| 158 | SkNx operator >> (int bits) const { return _mm_srai_epi32(fVec, bits); } |
| 159 | |
mtklein | 58e389b | 2016-07-15 07:00:11 -0700 | [diff] [blame] | 160 | SkNx operator == (const SkNx& o) const { return _mm_cmpeq_epi32 (fVec, o.fVec); } |
| 161 | SkNx operator < (const SkNx& o) const { return _mm_cmplt_epi32 (fVec, o.fVec); } |
| 162 | SkNx operator > (const SkNx& o) const { return _mm_cmpgt_epi32 (fVec, o.fVec); } |
| 163 | |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 164 | int32_t operator[](int k) const { |
mtklein | 8273ca4 | 2016-02-09 11:32:51 -0800 | [diff] [blame] | 165 | SkASSERT(0 <= k && k < 4); |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 166 | union { __m128i v; int32_t is[4]; } pun = {fVec}; |
mtklein | 8273ca4 | 2016-02-09 11:32:51 -0800 | [diff] [blame] | 167 | return pun.is[k&3]; |
| 168 | } |
mtklein | 8273ca4 | 2016-02-09 11:32:51 -0800 | [diff] [blame] | 169 | |
mtklein | 58e389b | 2016-07-15 07:00:11 -0700 | [diff] [blame] | 170 | SkNx thenElse(const SkNx& t, const SkNx& e) const { |
| 171 | #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 |
| 172 | return _mm_blendv_epi8(e.fVec, t.fVec, fVec); |
| 173 | #else |
| 174 | return _mm_or_si128(_mm_and_si128 (fVec, t.fVec), |
| 175 | _mm_andnot_si128(fVec, e.fVec)); |
| 176 | #endif |
| 177 | } |
| 178 | |
mtklein | 8273ca4 | 2016-02-09 11:32:51 -0800 | [diff] [blame] | 179 | __m128i fVec; |
| 180 | }; |
| 181 | |
| 182 | template <> |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 183 | class SkNx<4, uint32_t> { |
| 184 | public: |
| 185 | SkNx(const __m128i& vec) : fVec(vec) {} |
| 186 | |
| 187 | SkNx() {} |
| 188 | SkNx(uint32_t val) : fVec(_mm_set1_epi32(val)) {} |
| 189 | static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); } |
| 190 | SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d) : fVec(_mm_setr_epi32(a,b,c,d)) {} |
| 191 | |
| 192 | void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); } |
| 193 | |
| 194 | SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec); } |
| 195 | SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec); } |
| 196 | // Not quite sure how to best do operator * in SSE2. We probably don't use it. |
| 197 | |
| 198 | SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); } |
| 199 | SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); } |
| 200 | SkNx operator ^ (const SkNx& o) const { return _mm_xor_si128(fVec, o.fVec); } |
| 201 | |
| 202 | SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); } |
| 203 | SkNx operator >> (int bits) const { return _mm_srli_epi32(fVec, bits); } |
| 204 | |
| 205 | SkNx operator == (const SkNx& o) const { return _mm_cmpeq_epi32 (fVec, o.fVec); } |
| 206 | // operator < and > take a little extra fiddling to make work for unsigned ints. |
| 207 | |
| 208 | uint32_t operator[](int k) const { |
| 209 | SkASSERT(0 <= k && k < 4); |
| 210 | union { __m128i v; uint32_t us[4]; } pun = {fVec}; |
| 211 | return pun.us[k&3]; |
| 212 | } |
| 213 | |
| 214 | SkNx thenElse(const SkNx& t, const SkNx& e) const { |
| 215 | #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 |
| 216 | return _mm_blendv_epi8(e.fVec, t.fVec, fVec); |
| 217 | #else |
| 218 | return _mm_or_si128(_mm_and_si128 (fVec, t.fVec), |
| 219 | _mm_andnot_si128(fVec, e.fVec)); |
| 220 | #endif |
| 221 | } |
| 222 | |
| 223 | __m128i fVec; |
| 224 | }; |
| 225 | |
| 226 | |
| 227 | template <> |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 228 | class SkNx<4, uint16_t> { |
mtklein | 115acee | 2015-04-14 14:02:52 -0700 | [diff] [blame] | 229 | public: |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 230 | SkNx(const __m128i& vec) : fVec(vec) {} |
mtklein | 115acee | 2015-04-14 14:02:52 -0700 | [diff] [blame] | 231 | |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 232 | SkNx() {} |
| 233 | SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {} |
mtklein | 507ef6d | 2016-01-31 08:02:47 -0800 | [diff] [blame] | 234 | static SkNx Load(const void* ptr) { return _mm_loadl_epi64((const __m128i*)ptr); } |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 235 | SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d) : fVec(_mm_setr_epi16(a,b,c,d,0,0,0,0)) {} |
mtklein | 115acee | 2015-04-14 14:02:52 -0700 | [diff] [blame] | 236 | |
mtklein | 507ef6d | 2016-01-31 08:02:47 -0800 | [diff] [blame] | 237 | void store(void* ptr) const { _mm_storel_epi64((__m128i*)ptr, fVec); } |
mtklein | 115acee | 2015-04-14 14:02:52 -0700 | [diff] [blame] | 238 | |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 239 | SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec); } |
| 240 | SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec); } |
| 241 | SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec); } |
mtklein | 115acee | 2015-04-14 14:02:52 -0700 | [diff] [blame] | 242 | |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 243 | SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); } |
| 244 | SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); } |
mtklein | 115acee | 2015-04-14 14:02:52 -0700 | [diff] [blame] | 245 | |
mtklein | e4c0bee | 2016-02-09 10:35:27 -0800 | [diff] [blame] | 246 | uint16_t operator[](int k) const { |
mtklein | 115acee | 2015-04-14 14:02:52 -0700 | [diff] [blame] | 247 | SkASSERT(0 <= k && k < 4); |
mtklein | e4c0bee | 2016-02-09 10:35:27 -0800 | [diff] [blame] | 248 | union { __m128i v; uint16_t us[8]; } pun = {fVec}; |
| 249 | return pun.us[k&3]; |
mtklein | 115acee | 2015-04-14 14:02:52 -0700 | [diff] [blame] | 250 | } |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 251 | |
mtklein | 115acee | 2015-04-14 14:02:52 -0700 | [diff] [blame] | 252 | __m128i fVec; |
| 253 | }; |
| 254 | |
| 255 | template <> |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 256 | class SkNx<8, uint16_t> { |
mtklein | 115acee | 2015-04-14 14:02:52 -0700 | [diff] [blame] | 257 | public: |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 258 | SkNx(const __m128i& vec) : fVec(vec) {} |
mtklein | 115acee | 2015-04-14 14:02:52 -0700 | [diff] [blame] | 259 | |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 260 | SkNx() {} |
| 261 | SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {} |
mtklein | 507ef6d | 2016-01-31 08:02:47 -0800 | [diff] [blame] | 262 | static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); } |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 263 | SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d, |
mtklein | 115acee | 2015-04-14 14:02:52 -0700 | [diff] [blame] | 264 | uint16_t e, uint16_t f, uint16_t g, uint16_t h) : fVec(_mm_setr_epi16(a,b,c,d,e,f,g,h)) {} |
| 265 | |
mtklein | 507ef6d | 2016-01-31 08:02:47 -0800 | [diff] [blame] | 266 | void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); } |
mtklein | 115acee | 2015-04-14 14:02:52 -0700 | [diff] [blame] | 267 | |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 268 | SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec); } |
| 269 | SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec); } |
| 270 | SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec); } |
mtklein | 115acee | 2015-04-14 14:02:52 -0700 | [diff] [blame] | 271 | |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 272 | SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); } |
| 273 | SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); } |
mtklein | 115acee | 2015-04-14 14:02:52 -0700 | [diff] [blame] | 274 | |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 275 | static SkNx Min(const SkNx& a, const SkNx& b) { |
mtklein | 27e517a | 2015-05-14 17:53:04 -0700 | [diff] [blame] | 276 | // No unsigned _mm_min_epu16, so we'll shift into a space where we can use the |
| 277 | // signed version, _mm_min_epi16, then shift back. |
| 278 | const uint16_t top = 0x8000; // Keep this separate from _mm_set1_epi16 or MSVC will whine. |
| 279 | const __m128i top_8x = _mm_set1_epi16(top); |
| 280 | return _mm_add_epi8(top_8x, _mm_min_epi16(_mm_sub_epi8(a.fVec, top_8x), |
| 281 | _mm_sub_epi8(b.fVec, top_8x))); |
| 282 | } |
| 283 | |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 284 | SkNx thenElse(const SkNx& t, const SkNx& e) const { |
mtklein | 4be181e | 2015-07-14 10:54:19 -0700 | [diff] [blame] | 285 | return _mm_or_si128(_mm_and_si128 (fVec, t.fVec), |
| 286 | _mm_andnot_si128(fVec, e.fVec)); |
| 287 | } |
| 288 | |
mtklein | e4c0bee | 2016-02-09 10:35:27 -0800 | [diff] [blame] | 289 | uint16_t operator[](int k) const { |
mtklein | 115acee | 2015-04-14 14:02:52 -0700 | [diff] [blame] | 290 | SkASSERT(0 <= k && k < 8); |
mtklein | e4c0bee | 2016-02-09 10:35:27 -0800 | [diff] [blame] | 291 | union { __m128i v; uint16_t us[8]; } pun = {fVec}; |
| 292 | return pun.us[k&7]; |
mtklein | 115acee | 2015-04-14 14:02:52 -0700 | [diff] [blame] | 293 | } |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 294 | |
| 295 | __m128i fVec; |
| 296 | }; |
| 297 | |
| 298 | template <> |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 299 | class SkNx<4, uint8_t> { |
| 300 | public: |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 301 | SkNx() {} |
herb | fd5a260 | 2016-03-01 07:01:23 -0800 | [diff] [blame] | 302 | SkNx(const __m128i& vec) : fVec(vec) {} |
| 303 | SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d) |
| 304 | : fVec(_mm_setr_epi8(a,b,c,d, 0,0,0,0, 0,0,0,0, 0,0,0,0)) {} |
| 305 | |
| 306 | |
mtklein | 507ef6d | 2016-01-31 08:02:47 -0800 | [diff] [blame] | 307 | static SkNx Load(const void* ptr) { return _mm_cvtsi32_si128(*(const int*)ptr); } |
| 308 | void store(void* ptr) const { *(int*)ptr = _mm_cvtsi128_si32(fVec); } |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 309 | |
herb | fd5a260 | 2016-03-01 07:01:23 -0800 | [diff] [blame] | 310 | uint8_t operator[](int k) const { |
| 311 | SkASSERT(0 <= k && k < 4); |
| 312 | union { __m128i v; uint8_t us[16]; } pun = {fVec}; |
| 313 | return pun.us[k&3]; |
| 314 | } |
| 315 | |
mtklein | 6f37b4a | 2015-12-14 11:25:18 -0800 | [diff] [blame] | 316 | // TODO as needed |
| 317 | |
| 318 | __m128i fVec; |
| 319 | }; |
| 320 | |
| 321 | template <> |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 322 | class SkNx<16, uint8_t> { |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 323 | public: |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 324 | SkNx(const __m128i& vec) : fVec(vec) {} |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 325 | |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 326 | SkNx() {} |
| 327 | SkNx(uint8_t val) : fVec(_mm_set1_epi8(val)) {} |
mtklein | 507ef6d | 2016-01-31 08:02:47 -0800 | [diff] [blame] | 328 | static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); } |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 329 | SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d, |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 330 | uint8_t e, uint8_t f, uint8_t g, uint8_t h, |
| 331 | uint8_t i, uint8_t j, uint8_t k, uint8_t l, |
| 332 | uint8_t m, uint8_t n, uint8_t o, uint8_t p) |
| 333 | : fVec(_mm_setr_epi8(a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p)) {} |
| 334 | |
mtklein | 507ef6d | 2016-01-31 08:02:47 -0800 | [diff] [blame] | 335 | void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); } |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 336 | |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 337 | SkNx saturatedAdd(const SkNx& o) const { return _mm_adds_epu8(fVec, o.fVec); } |
mtklein | 6cbf18c | 2015-05-12 15:48:09 -0700 | [diff] [blame] | 338 | |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 339 | SkNx operator + (const SkNx& o) const { return _mm_add_epi8(fVec, o.fVec); } |
| 340 | SkNx operator - (const SkNx& o) const { return _mm_sub_epi8(fVec, o.fVec); } |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 341 | |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 342 | static SkNx Min(const SkNx& a, const SkNx& b) { return _mm_min_epu8(a.fVec, b.fVec); } |
| 343 | SkNx operator < (const SkNx& o) const { |
mtklein | b5e8611 | 2015-06-24 15:18:39 -0700 | [diff] [blame] | 344 | // There's no unsigned _mm_cmplt_epu8, so we flip the sign bits then use a signed compare. |
| 345 | auto flip = _mm_set1_epi8(char(0x80)); |
| 346 | return _mm_cmplt_epi8(_mm_xor_si128(flip, fVec), _mm_xor_si128(flip, o.fVec)); |
| 347 | } |
mtklein | 27e517a | 2015-05-14 17:53:04 -0700 | [diff] [blame] | 348 | |
mtklein | e4c0bee | 2016-02-09 10:35:27 -0800 | [diff] [blame] | 349 | uint8_t operator[](int k) const { |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 350 | SkASSERT(0 <= k && k < 16); |
mtklein | e4c0bee | 2016-02-09 10:35:27 -0800 | [diff] [blame] | 351 | union { __m128i v; uint8_t us[16]; } pun = {fVec}; |
| 352 | return pun.us[k&15]; |
mtklein | d2ffd36 | 2015-05-12 06:11:21 -0700 | [diff] [blame] | 353 | } |
| 354 | |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 355 | SkNx thenElse(const SkNx& t, const SkNx& e) const { |
mtklein | b5e8611 | 2015-06-24 15:18:39 -0700 | [diff] [blame] | 356 | return _mm_or_si128(_mm_and_si128 (fVec, t.fVec), |
| 357 | _mm_andnot_si128(fVec, e.fVec)); |
| 358 | } |
| 359 | |
mtklein | 115acee | 2015-04-14 14:02:52 -0700 | [diff] [blame] | 360 | __m128i fVec; |
| 361 | }; |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 362 | |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 363 | template<> /*static*/ inline Sk4f SkNx_cast<float, int32_t>(const Sk4i& src) { |
mtklein | 0cf795f | 2016-02-17 07:23:36 -0800 | [diff] [blame] | 364 | return _mm_cvtepi32_ps(src.fVec); |
| 365 | } |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 366 | template<> /*static*/ inline Sk4f SkNx_cast<float, uint32_t>(const Sk4u& src) { |
| 367 | return SkNx_cast<float>(Sk4i::Load(&src)); |
| 368 | } |
mtklein | 0cf795f | 2016-02-17 07:23:36 -0800 | [diff] [blame] | 369 | |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 370 | template <> /*static*/ inline Sk4i SkNx_cast<int32_t, float>(const Sk4f& src) { |
mtklein | 0cf795f | 2016-02-17 07:23:36 -0800 | [diff] [blame] | 371 | return _mm_cvttps_epi32(src.fVec); |
| 372 | } |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 373 | |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 374 | template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, int32_t>(const Sk4i& src) { |
mtklein | 036e183 | 2016-07-15 07:45:53 -0700 | [diff] [blame] | 375 | #if 0 && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 |
| 376 | // TODO: This seems to be causing code generation problems. Investigate? |
| 377 | return _mm_packus_epi32(src.fVec); |
| 378 | #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 |
mtklein | 629f25a | 2016-02-08 05:54:38 -0800 | [diff] [blame] | 379 | // With SSSE3, we can just shuffle the low 2 bytes from each lane right into place. |
| 380 | const int _ = ~0; |
mtklein | 036e183 | 2016-07-15 07:45:53 -0700 | [diff] [blame] | 381 | return _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,1, 4,5, 8,9, 12,13, _,_,_,_,_,_,_,_)); |
mtklein | 629f25a | 2016-02-08 05:54:38 -0800 | [diff] [blame] | 382 | #else |
mtklein | 036e183 | 2016-07-15 07:45:53 -0700 | [diff] [blame] | 383 | // With SSE2, we have to sign extend our input, making _mm_packs_epi32 do the pack we want. |
| 384 | __m128i x = _mm_srai_epi32(_mm_slli_epi32(src.fVec, 16), 16); |
| 385 | return _mm_packs_epi32(x,x); |
mtklein | 629f25a | 2016-02-08 05:54:38 -0800 | [diff] [blame] | 386 | #endif |
mtklein | 2d340f2 | 2016-02-06 19:38:39 -0800 | [diff] [blame] | 387 | } |
| 388 | |
mtklein | 036e183 | 2016-07-15 07:45:53 -0700 | [diff] [blame] | 389 | template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) { |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 390 | return SkNx_cast<uint16_t>(SkNx_cast<int32_t>(src)); |
mtklein | 036e183 | 2016-07-15 07:45:53 -0700 | [diff] [blame] | 391 | } |
| 392 | |
mtklein | 97120a7 | 2016-02-12 14:19:06 -0800 | [diff] [blame] | 393 | template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) { |
| 394 | auto _32 = _mm_cvttps_epi32(src.fVec); |
| 395 | #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 |
| 396 | const int _ = ~0; |
| 397 | return _mm_shuffle_epi8(_32, _mm_setr_epi8(0,4,8,12, _,_,_,_, _,_,_,_, _,_,_,_)); |
| 398 | #else |
| 399 | auto _16 = _mm_packus_epi16(_32, _32); |
| 400 | return _mm_packus_epi16(_16, _16); |
| 401 | #endif |
mtklein | c1eb311 | 2016-02-11 08:10:22 -0800 | [diff] [blame] | 402 | } |
| 403 | |
mtklein | 97120a7 | 2016-02-12 14:19:06 -0800 | [diff] [blame] | 404 | template<> /*static*/ inline Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) { |
| 405 | #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 |
| 406 | const int _ = ~0; |
| 407 | auto _32 = _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,_,_,_, 1,_,_,_, 2,_,_,_, 3,_,_,_)); |
| 408 | #else |
| 409 | auto _16 = _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()), |
| 410 | _32 = _mm_unpacklo_epi16(_16, _mm_setzero_si128()); |
| 411 | #endif |
| 412 | return _mm_cvtepi32_ps(_32); |
mtklein | c1eb311 | 2016-02-11 08:10:22 -0800 | [diff] [blame] | 413 | } |
mtklein | 97120a7 | 2016-02-12 14:19:06 -0800 | [diff] [blame] | 414 | |
| 415 | template<> /*static*/ inline Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) { |
| 416 | auto _32 = _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128()); |
| 417 | return _mm_cvtepi32_ps(_32); |
mtklein | 2d340f2 | 2016-02-06 19:38:39 -0800 | [diff] [blame] | 418 | } |
| 419 | |
mtklein | f8f90e4 | 2016-03-21 10:04:46 -0700 | [diff] [blame] | 420 | template<> /*static*/ inline Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) { |
| 421 | Sk8f ab, cd; |
| 422 | SkNx_split(src, &ab, &cd); |
| 423 | |
| 424 | Sk4f a,b,c,d; |
| 425 | SkNx_split(ab, &a, &b); |
| 426 | SkNx_split(cd, &c, &d); |
| 427 | |
| 428 | return _mm_packus_epi16(_mm_packus_epi16(_mm_cvttps_epi32(a.fVec), |
| 429 | _mm_cvttps_epi32(b.fVec)), |
| 430 | _mm_packus_epi16(_mm_cvttps_epi32(c.fVec), |
| 431 | _mm_cvttps_epi32(d.fVec))); |
mtklein | 97120a7 | 2016-02-12 14:19:06 -0800 | [diff] [blame] | 432 | } |
| 433 | |
| 434 | template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) { |
| 435 | return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()); |
| 436 | } |
| 437 | |
| 438 | template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) { |
| 439 | return _mm_packus_epi16(src.fVec, src.fVec); |
mtklein | 550e9b0 | 2016-01-20 11:55:51 -0800 | [diff] [blame] | 440 | } |
| 441 | |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 442 | template<> /*static*/ inline Sk4i SkNx_cast<int32_t, uint16_t>(const Sk4h& src) { |
mtklein | 58e389b | 2016-07-15 07:00:11 -0700 | [diff] [blame] | 443 | return _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128()); |
| 444 | } |
| 445 | |
mtklein | d05a875 | 2016-07-29 10:10:15 -0700 | [diff] [blame] | 446 | template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, int32_t>(const Sk4i& src) { |
msarett | 7d3ff71 | 2016-07-12 14:55:45 -0700 | [diff] [blame] | 447 | return _mm_packus_epi16(_mm_packus_epi16(src.fVec, src.fVec), src.fVec); |
| 448 | } |
| 449 | |
msarett | 15ee3de | 2016-08-02 11:30:30 -0700 | [diff] [blame^] | 450 | template<> /*static*/ inline Sk4i SkNx_cast<int32_t, uint32_t>(const Sk4u& src) { |
| 451 | return src.fVec; |
| 452 | } |
| 453 | |
msarett | 7d3ff71 | 2016-07-12 14:55:45 -0700 | [diff] [blame] | 454 | static inline Sk4i Sk4f_round(const Sk4f& x) { |
| 455 | return _mm_cvtps_epi32(x.fVec); |
| 456 | } |
| 457 | |
mtklein | f660b7c | 2016-07-26 08:01:19 -0700 | [diff] [blame] | 458 | static inline void Sk4h_load4(const void* ptr, Sk4h* r, Sk4h* g, Sk4h* b, Sk4h* a) { |
| 459 | __m128i lo = _mm_loadu_si128(((__m128i*)ptr) + 0), |
| 460 | hi = _mm_loadu_si128(((__m128i*)ptr) + 1); |
| 461 | __m128i even = _mm_unpacklo_epi16(lo, hi), // r0 r2 g0 g2 b0 b2 a0 a2 |
| 462 | odd = _mm_unpackhi_epi16(lo, hi); // r1 r3 ... |
| 463 | __m128i rg = _mm_unpacklo_epi16(even, odd), // r0 r1 r2 r3 g0 g1 g2 g3 |
| 464 | ba = _mm_unpackhi_epi16(even, odd); // b0 b1 ... a0 a1 ... |
| 465 | *r = rg; |
| 466 | *g = _mm_srli_si128(rg, 8); |
| 467 | *b = ba; |
| 468 | *a = _mm_srli_si128(ba, 8); |
| 469 | } |
| 470 | |
msarett | 6bdbf44 | 2016-07-19 09:07:55 -0700 | [diff] [blame] | 471 | static inline void Sk4h_store4(void* dst, const Sk4h& r, const Sk4h& g, const Sk4h& b, |
| 472 | const Sk4h& a) { |
| 473 | __m128i rg = _mm_unpacklo_epi16(r.fVec, g.fVec); |
| 474 | __m128i ba = _mm_unpacklo_epi16(b.fVec, a.fVec); |
| 475 | __m128i lo = _mm_unpacklo_epi32(rg, ba); |
| 476 | __m128i hi = _mm_unpackhi_epi32(rg, ba); |
| 477 | _mm_storeu_si128(((__m128i*) dst) + 0, lo); |
| 478 | _mm_storeu_si128(((__m128i*) dst) + 1, hi); |
| 479 | } |
| 480 | |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 481 | #endif//SkNx_sse_DEFINED |