blob: 1fc235d99be4793bf9c9cffa906ccd0bfa39155e [file] [log] [blame]
mtkleinc9adb052015-03-30 10:50:27 -07001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef SkNx_sse_DEFINED
9#define SkNx_sse_DEFINED
10
mtklein244a6532016-04-19 14:21:30 -070011#include "SkCpu.h"
12
mtkleinc9adb052015-03-30 10:50:27 -070013// This file may assume <= SSE2, but must check SK_CPU_SSE_LEVEL for anything more recent.
mtkleine4c0bee2016-02-09 10:35:27 -080014// If you do, make sure this is in a static inline function... anywhere else risks violating ODR.
mtkleinaa999cb2015-05-22 17:18:21 -070015
fmalitac2e0ac42015-12-03 09:15:25 -080016#define SKNX_IS_FAST
17
mtkleinc9adb052015-03-30 10:50:27 -070018template <>
mtklein6c221b42015-11-20 13:53:19 -080019class SkNx<2, float> {
mtkleinc9adb052015-03-30 10:50:27 -070020public:
mtklein6c221b42015-11-20 13:53:19 -080021 SkNx(const __m128& vec) : fVec(vec) {}
mtkleinc9adb052015-03-30 10:50:27 -070022
mtklein6c221b42015-11-20 13:53:19 -080023 SkNx() {}
24 SkNx(float val) : fVec(_mm_set1_ps(val)) {}
mtklein507ef6d2016-01-31 08:02:47 -080025 static SkNx Load(const void* ptr) {
26 return _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*)ptr));
mtkleinc9adb052015-03-30 10:50:27 -070027 }
mtklein6c221b42015-11-20 13:53:19 -080028 SkNx(float a, float b) : fVec(_mm_setr_ps(a,b,0,0)) {}
mtkleinc9adb052015-03-30 10:50:27 -070029
mtklein507ef6d2016-01-31 08:02:47 -080030 void store(void* ptr) const { _mm_storel_pi((__m64*)ptr, fVec); }
mtkleinc9adb052015-03-30 10:50:27 -070031
mtklein6c221b42015-11-20 13:53:19 -080032 SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); }
33 SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); }
34 SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); }
35 SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); }
mtkleinc9adb052015-03-30 10:50:27 -070036
mtklein6c221b42015-11-20 13:53:19 -080037 SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec); }
38 SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec); }
39 SkNx operator < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec); }
40 SkNx operator > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec); }
41 SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec); }
42 SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec); }
mtkleinc9adb052015-03-30 10:50:27 -070043
mtklein6c221b42015-11-20 13:53:19 -080044 static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.fVec); }
45 static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); }
mtkleinc9adb052015-03-30 10:50:27 -070046
mtkleinf8f90e42016-03-21 10:04:46 -070047 SkNx sqrt() const { return _mm_sqrt_ps (fVec); }
48 SkNx rsqrt() const { return _mm_rsqrt_ps(fVec); }
49 SkNx invert() const { return _mm_rcp_ps(fVec); }
mtkleinc9adb052015-03-30 10:50:27 -070050
mtkleine4c0bee2016-02-09 10:35:27 -080051 float operator[](int k) const {
mtkleinc9adb052015-03-30 10:50:27 -070052 SkASSERT(0 <= k && k < 2);
53 union { __m128 v; float fs[4]; } pun = {fVec};
mtkleina156a8f2015-04-03 06:16:13 -070054 return pun.fs[k&1];
mtkleinc9adb052015-03-30 10:50:27 -070055 }
56
mtkleinb5e86112015-06-24 15:18:39 -070057 bool allTrue() const { return 0xff == (_mm_movemask_epi8(_mm_castps_si128(fVec)) & 0xff); }
58 bool anyTrue() const { return 0x00 != (_mm_movemask_epi8(_mm_castps_si128(fVec)) & 0xff); }
59
mtkleinc9adb052015-03-30 10:50:27 -070060 __m128 fVec;
61};
62
63template <>
mtklein6c221b42015-11-20 13:53:19 -080064class SkNx<4, float> {
mtkleinc9adb052015-03-30 10:50:27 -070065public:
mtklein6c221b42015-11-20 13:53:19 -080066 SkNx(const __m128& vec) : fVec(vec) {}
mtkleinc9adb052015-03-30 10:50:27 -070067
mtklein6c221b42015-11-20 13:53:19 -080068 SkNx() {}
69 SkNx(float val) : fVec( _mm_set1_ps(val) ) {}
mtklein507ef6d2016-01-31 08:02:47 -080070 static SkNx Load(const void* ptr) { return _mm_loadu_ps((const float*)ptr); }
mtkleinaba1dc82015-08-31 14:39:59 -070071
mtklein6c221b42015-11-20 13:53:19 -080072 SkNx(float a, float b, float c, float d) : fVec(_mm_setr_ps(a,b,c,d)) {}
mtkleinc9adb052015-03-30 10:50:27 -070073
mtklein507ef6d2016-01-31 08:02:47 -080074 void store(void* ptr) const { _mm_storeu_ps((float*)ptr, fVec); }
mtklein9db43ac2015-12-01 07:10:21 -080075
mtklein6c221b42015-11-20 13:53:19 -080076 SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); }
77 SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); }
78 SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); }
79 SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); }
mtkleinc9adb052015-03-30 10:50:27 -070080
mtklein6c221b42015-11-20 13:53:19 -080081 SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec); }
82 SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec); }
83 SkNx operator < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec); }
84 SkNx operator > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec); }
85 SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec); }
86 SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec); }
mtkleinc9adb052015-03-30 10:50:27 -070087
mtklein6c221b42015-11-20 13:53:19 -080088 static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.fVec); }
89 static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); }
mtkleinc9adb052015-03-30 10:50:27 -070090
mtkleinc33065a2016-01-15 12:16:40 -080091 SkNx abs() const { return _mm_andnot_ps(_mm_set1_ps(-0.0f), fVec); }
mtklein244a6532016-04-19 14:21:30 -070092 SkNx floor() const {
93 if (SkCpu::Supports(SkCpu::SSE41)) {
94 __m128 r;
95 #if defined(__GNUC__) || defined(__clang__)
96 asm("roundps $0x1, %[fVec], %[r]" : [r]"=x"(r) : [fVec]"x"(fVec));
97 #else
98 r = _mm_floor_ps(fVec);
99 #endif
100 return r;
101 }
102 // Emulate _mm_floor_ps() with SSE2:
103 // - roundtrip through integers via truncation
104 // - subtract 1 if that's too big (possible for negative values).
105 // This restricts the domain of our inputs to a maximum somehwere around 2^31.
106 // Seems plenty big.
107 __m128 roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(fVec));
108 __m128 too_big = _mm_cmpgt_ps(roundtrip, fVec);
109 return _mm_sub_ps(roundtrip, _mm_and_ps(too_big, _mm_set1_ps(1.0f)));
110 }
mtkleinc33065a2016-01-15 12:16:40 -0800111
mtkleinf8f90e42016-03-21 10:04:46 -0700112 SkNx sqrt() const { return _mm_sqrt_ps (fVec); }
113 SkNx rsqrt() const { return _mm_rsqrt_ps(fVec); }
114 SkNx invert() const { return _mm_rcp_ps(fVec); }
mtkleinc9adb052015-03-30 10:50:27 -0700115
mtkleine4c0bee2016-02-09 10:35:27 -0800116 float operator[](int k) const {
mtkleinc9adb052015-03-30 10:50:27 -0700117 SkASSERT(0 <= k && k < 4);
118 union { __m128 v; float fs[4]; } pun = {fVec};
mtkleina156a8f2015-04-03 06:16:13 -0700119 return pun.fs[k&3];
mtkleinc9adb052015-03-30 10:50:27 -0700120 }
121
mtkleinb5e86112015-06-24 15:18:39 -0700122 bool allTrue() const { return 0xffff == _mm_movemask_epi8(_mm_castps_si128(fVec)); }
123 bool anyTrue() const { return 0x0000 != _mm_movemask_epi8(_mm_castps_si128(fVec)); }
124
mtklein6c221b42015-11-20 13:53:19 -0800125 SkNx thenElse(const SkNx& t, const SkNx& e) const {
mtklein2aab22a2015-06-26 10:46:31 -0700126 return _mm_or_ps(_mm_and_ps (fVec, t.fVec),
127 _mm_andnot_ps(fVec, e.fVec));
128 }
129
mtkleinc9adb052015-03-30 10:50:27 -0700130 __m128 fVec;
131};
132
mtklein115acee2015-04-14 14:02:52 -0700133template <>
mtklein8273ca42016-02-09 11:32:51 -0800134class SkNx<4, int> {
135public:
136 SkNx(const __m128i& vec) : fVec(vec) {}
137
138 SkNx() {}
139 SkNx(int val) : fVec(_mm_set1_epi32(val)) {}
140 static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
141 SkNx(int a, int b, int c, int d) : fVec(_mm_setr_epi32(a,b,c,d)) {}
142
143 void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
144
145 SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec); }
146 SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec); }
147 SkNx operator * (const SkNx& o) const {
148 __m128i mul20 = _mm_mul_epu32(fVec, o.fVec),
149 mul31 = _mm_mul_epu32(_mm_srli_si128(fVec, 4), _mm_srli_si128(o.fVec, 4));
150 return _mm_unpacklo_epi32(_mm_shuffle_epi32(mul20, _MM_SHUFFLE(0,0,2,0)),
151 _mm_shuffle_epi32(mul31, _MM_SHUFFLE(0,0,2,0)));
152 }
153
154 SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); }
155 SkNx operator >> (int bits) const { return _mm_srai_epi32(fVec, bits); }
156
157 int operator[](int k) const {
158 SkASSERT(0 <= k && k < 4);
159 union { __m128i v; int is[4]; } pun = {fVec};
160 return pun.is[k&3];
161 }
mtklein8273ca42016-02-09 11:32:51 -0800162
163 __m128i fVec;
164};
165
166template <>
mtklein6c221b42015-11-20 13:53:19 -0800167class SkNx<4, uint16_t> {
mtklein115acee2015-04-14 14:02:52 -0700168public:
mtklein6c221b42015-11-20 13:53:19 -0800169 SkNx(const __m128i& vec) : fVec(vec) {}
mtklein115acee2015-04-14 14:02:52 -0700170
mtklein6c221b42015-11-20 13:53:19 -0800171 SkNx() {}
172 SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {}
mtklein507ef6d2016-01-31 08:02:47 -0800173 static SkNx Load(const void* ptr) { return _mm_loadl_epi64((const __m128i*)ptr); }
mtklein6c221b42015-11-20 13:53:19 -0800174 SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d) : fVec(_mm_setr_epi16(a,b,c,d,0,0,0,0)) {}
mtklein115acee2015-04-14 14:02:52 -0700175
mtklein507ef6d2016-01-31 08:02:47 -0800176 void store(void* ptr) const { _mm_storel_epi64((__m128i*)ptr, fVec); }
mtklein115acee2015-04-14 14:02:52 -0700177
mtklein6c221b42015-11-20 13:53:19 -0800178 SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec); }
179 SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec); }
180 SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec); }
mtklein115acee2015-04-14 14:02:52 -0700181
mtklein6c221b42015-11-20 13:53:19 -0800182 SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); }
183 SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); }
mtklein115acee2015-04-14 14:02:52 -0700184
mtkleine4c0bee2016-02-09 10:35:27 -0800185 uint16_t operator[](int k) const {
mtklein115acee2015-04-14 14:02:52 -0700186 SkASSERT(0 <= k && k < 4);
mtkleine4c0bee2016-02-09 10:35:27 -0800187 union { __m128i v; uint16_t us[8]; } pun = {fVec};
188 return pun.us[k&3];
mtklein115acee2015-04-14 14:02:52 -0700189 }
mtkleind2ffd362015-05-12 06:11:21 -0700190
mtklein115acee2015-04-14 14:02:52 -0700191 __m128i fVec;
192};
193
194template <>
mtklein6c221b42015-11-20 13:53:19 -0800195class SkNx<8, uint16_t> {
mtklein115acee2015-04-14 14:02:52 -0700196public:
mtklein6c221b42015-11-20 13:53:19 -0800197 SkNx(const __m128i& vec) : fVec(vec) {}
mtklein115acee2015-04-14 14:02:52 -0700198
mtklein6c221b42015-11-20 13:53:19 -0800199 SkNx() {}
200 SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {}
mtklein507ef6d2016-01-31 08:02:47 -0800201 static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
mtklein6c221b42015-11-20 13:53:19 -0800202 SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d,
mtklein115acee2015-04-14 14:02:52 -0700203 uint16_t e, uint16_t f, uint16_t g, uint16_t h) : fVec(_mm_setr_epi16(a,b,c,d,e,f,g,h)) {}
204
mtklein507ef6d2016-01-31 08:02:47 -0800205 void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
mtklein115acee2015-04-14 14:02:52 -0700206
mtklein6c221b42015-11-20 13:53:19 -0800207 SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec); }
208 SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec); }
209 SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec); }
mtklein115acee2015-04-14 14:02:52 -0700210
mtklein6c221b42015-11-20 13:53:19 -0800211 SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); }
212 SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); }
mtklein115acee2015-04-14 14:02:52 -0700213
mtklein6c221b42015-11-20 13:53:19 -0800214 static SkNx Min(const SkNx& a, const SkNx& b) {
mtklein27e517a2015-05-14 17:53:04 -0700215 // No unsigned _mm_min_epu16, so we'll shift into a space where we can use the
216 // signed version, _mm_min_epi16, then shift back.
217 const uint16_t top = 0x8000; // Keep this separate from _mm_set1_epi16 or MSVC will whine.
218 const __m128i top_8x = _mm_set1_epi16(top);
219 return _mm_add_epi8(top_8x, _mm_min_epi16(_mm_sub_epi8(a.fVec, top_8x),
220 _mm_sub_epi8(b.fVec, top_8x)));
221 }
222
mtklein6c221b42015-11-20 13:53:19 -0800223 SkNx thenElse(const SkNx& t, const SkNx& e) const {
mtklein4be181e2015-07-14 10:54:19 -0700224 return _mm_or_si128(_mm_and_si128 (fVec, t.fVec),
225 _mm_andnot_si128(fVec, e.fVec));
226 }
227
mtkleine4c0bee2016-02-09 10:35:27 -0800228 uint16_t operator[](int k) const {
mtklein115acee2015-04-14 14:02:52 -0700229 SkASSERT(0 <= k && k < 8);
mtkleine4c0bee2016-02-09 10:35:27 -0800230 union { __m128i v; uint16_t us[8]; } pun = {fVec};
231 return pun.us[k&7];
mtklein115acee2015-04-14 14:02:52 -0700232 }
mtkleind2ffd362015-05-12 06:11:21 -0700233
234 __m128i fVec;
235};
236
237template <>
mtklein6f37b4a2015-12-14 11:25:18 -0800238class SkNx<4, uint8_t> {
239public:
mtklein6f37b4a2015-12-14 11:25:18 -0800240 SkNx() {}
herbfd5a2602016-03-01 07:01:23 -0800241 SkNx(const __m128i& vec) : fVec(vec) {}
242 SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d)
243 : fVec(_mm_setr_epi8(a,b,c,d, 0,0,0,0, 0,0,0,0, 0,0,0,0)) {}
244
245
mtklein507ef6d2016-01-31 08:02:47 -0800246 static SkNx Load(const void* ptr) { return _mm_cvtsi32_si128(*(const int*)ptr); }
247 void store(void* ptr) const { *(int*)ptr = _mm_cvtsi128_si32(fVec); }
mtklein6f37b4a2015-12-14 11:25:18 -0800248
herbfd5a2602016-03-01 07:01:23 -0800249 uint8_t operator[](int k) const {
250 SkASSERT(0 <= k && k < 4);
251 union { __m128i v; uint8_t us[16]; } pun = {fVec};
252 return pun.us[k&3];
253 }
254
mtklein6f37b4a2015-12-14 11:25:18 -0800255 // TODO as needed
256
257 __m128i fVec;
258};
259
260template <>
mtklein6c221b42015-11-20 13:53:19 -0800261class SkNx<16, uint8_t> {
mtkleind2ffd362015-05-12 06:11:21 -0700262public:
mtklein6c221b42015-11-20 13:53:19 -0800263 SkNx(const __m128i& vec) : fVec(vec) {}
mtkleind2ffd362015-05-12 06:11:21 -0700264
mtklein6c221b42015-11-20 13:53:19 -0800265 SkNx() {}
266 SkNx(uint8_t val) : fVec(_mm_set1_epi8(val)) {}
mtklein507ef6d2016-01-31 08:02:47 -0800267 static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
mtklein6c221b42015-11-20 13:53:19 -0800268 SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d,
mtkleind2ffd362015-05-12 06:11:21 -0700269 uint8_t e, uint8_t f, uint8_t g, uint8_t h,
270 uint8_t i, uint8_t j, uint8_t k, uint8_t l,
271 uint8_t m, uint8_t n, uint8_t o, uint8_t p)
272 : fVec(_mm_setr_epi8(a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p)) {}
273
mtklein507ef6d2016-01-31 08:02:47 -0800274 void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
mtkleind2ffd362015-05-12 06:11:21 -0700275
mtklein6c221b42015-11-20 13:53:19 -0800276 SkNx saturatedAdd(const SkNx& o) const { return _mm_adds_epu8(fVec, o.fVec); }
mtklein6cbf18c2015-05-12 15:48:09 -0700277
mtklein6c221b42015-11-20 13:53:19 -0800278 SkNx operator + (const SkNx& o) const { return _mm_add_epi8(fVec, o.fVec); }
279 SkNx operator - (const SkNx& o) const { return _mm_sub_epi8(fVec, o.fVec); }
mtkleind2ffd362015-05-12 06:11:21 -0700280
mtklein6c221b42015-11-20 13:53:19 -0800281 static SkNx Min(const SkNx& a, const SkNx& b) { return _mm_min_epu8(a.fVec, b.fVec); }
282 SkNx operator < (const SkNx& o) const {
mtkleinb5e86112015-06-24 15:18:39 -0700283 // There's no unsigned _mm_cmplt_epu8, so we flip the sign bits then use a signed compare.
284 auto flip = _mm_set1_epi8(char(0x80));
285 return _mm_cmplt_epi8(_mm_xor_si128(flip, fVec), _mm_xor_si128(flip, o.fVec));
286 }
mtklein27e517a2015-05-14 17:53:04 -0700287
mtkleine4c0bee2016-02-09 10:35:27 -0800288 uint8_t operator[](int k) const {
mtkleind2ffd362015-05-12 06:11:21 -0700289 SkASSERT(0 <= k && k < 16);
mtkleine4c0bee2016-02-09 10:35:27 -0800290 union { __m128i v; uint8_t us[16]; } pun = {fVec};
291 return pun.us[k&15];
mtkleind2ffd362015-05-12 06:11:21 -0700292 }
293
mtklein6c221b42015-11-20 13:53:19 -0800294 SkNx thenElse(const SkNx& t, const SkNx& e) const {
mtkleinb5e86112015-06-24 15:18:39 -0700295 return _mm_or_si128(_mm_and_si128 (fVec, t.fVec),
296 _mm_andnot_si128(fVec, e.fVec));
297 }
298
mtklein115acee2015-04-14 14:02:52 -0700299 __m128i fVec;
300};
mtkleinc9adb052015-03-30 10:50:27 -0700301
mtklein0cf795f2016-02-17 07:23:36 -0800302template<> /*static*/ inline Sk4f SkNx_cast<float, int>(const Sk4i& src) {
303 return _mm_cvtepi32_ps(src.fVec);
304}
305
306template <> /*static*/ inline Sk4i SkNx_cast<int, float>(const Sk4f& src) {
307 return _mm_cvttps_epi32(src.fVec);
308}
mtklein6c221b42015-11-20 13:53:19 -0800309
mtklein97120a72016-02-12 14:19:06 -0800310template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) {
311 auto _32 = _mm_cvttps_epi32(src.fVec);
mtklein629f25a2016-02-08 05:54:38 -0800312 // Ideally we'd use _mm_packus_epi32 here. But that's SSE4.1+.
313#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
314 // With SSSE3, we can just shuffle the low 2 bytes from each lane right into place.
315 const int _ = ~0;
316 return _mm_shuffle_epi8(_32, _mm_setr_epi8(0,1, 4,5, 8,9, 12,13, _,_,_,_,_,_,_,_));
317#else
318 // With SSE2, we have to emulate _mm_packus_epi32 with _mm_packs_epi32:
319 _32 = _mm_sub_epi32(_32, _mm_set1_epi32((int)0x00008000));
320 return _mm_add_epi16(_mm_packs_epi32(_32, _32), _mm_set1_epi16((short)0x8000));
321#endif
mtklein2d340f22016-02-06 19:38:39 -0800322}
323
mtklein97120a72016-02-12 14:19:06 -0800324template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) {
325 auto _32 = _mm_cvttps_epi32(src.fVec);
326#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
327 const int _ = ~0;
328 return _mm_shuffle_epi8(_32, _mm_setr_epi8(0,4,8,12, _,_,_,_, _,_,_,_, _,_,_,_));
329#else
330 auto _16 = _mm_packus_epi16(_32, _32);
331 return _mm_packus_epi16(_16, _16);
332#endif
mtkleinc1eb3112016-02-11 08:10:22 -0800333}
334
mtklein97120a72016-02-12 14:19:06 -0800335template<> /*static*/ inline Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) {
336#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
337 const int _ = ~0;
338 auto _32 = _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,_,_,_, 1,_,_,_, 2,_,_,_, 3,_,_,_));
339#else
340 auto _16 = _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()),
341 _32 = _mm_unpacklo_epi16(_16, _mm_setzero_si128());
342#endif
343 return _mm_cvtepi32_ps(_32);
mtkleinc1eb3112016-02-11 08:10:22 -0800344}
mtklein97120a72016-02-12 14:19:06 -0800345
346template<> /*static*/ inline Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) {
347 auto _32 = _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128());
348 return _mm_cvtepi32_ps(_32);
mtklein2d340f22016-02-06 19:38:39 -0800349}
350
mtkleinf8f90e42016-03-21 10:04:46 -0700351template<> /*static*/ inline Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) {
352 Sk8f ab, cd;
353 SkNx_split(src, &ab, &cd);
354
355 Sk4f a,b,c,d;
356 SkNx_split(ab, &a, &b);
357 SkNx_split(cd, &c, &d);
358
359 return _mm_packus_epi16(_mm_packus_epi16(_mm_cvttps_epi32(a.fVec),
360 _mm_cvttps_epi32(b.fVec)),
361 _mm_packus_epi16(_mm_cvttps_epi32(c.fVec),
362 _mm_cvttps_epi32(d.fVec)));
mtklein97120a72016-02-12 14:19:06 -0800363}
364
365template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) {
366 return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128());
367}
368
369template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) {
370 return _mm_packus_epi16(src.fVec, src.fVec);
mtklein550e9b02016-01-20 11:55:51 -0800371}
372
mtkleinc9adb052015-03-30 10:50:27 -0700373#endif//SkNx_sse_DEFINED