blob: 09cd92b3cbb0eb49d2806e0d6974da55c0922b4e [file] [log] [blame]
Anders Carlsson566d8da2008-12-22 00:01:20 +00001/*===---- xmmintrin.h - SSE intrinsics -------------------------------------===
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to deal
5 * in the Software without restriction, including without limitation the rights
6 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 * copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19 * THE SOFTWARE.
20 *
21 *===-----------------------------------------------------------------------===
22 */
23
24#ifndef __XMMINTRIN_H
25#define __XMMINTRIN_H
26
27#ifndef __SSE__
28#error "MMX instruction set not enabled"
29#else
30
Anders Carlsson4fcc3132008-12-22 00:48:30 +000031#include <mmintrin.h>
32
Anders Carlsson398082e2008-12-22 17:42:23 +000033typedef float __v4sf __attribute__((__vector_size__(16)));
Anders Carlsson566d8da2008-12-22 00:01:20 +000034typedef float __m128 __attribute__((__vector_size__(16)));
35
Anders Carlsson398082e2008-12-22 17:42:23 +000036#include <mm_malloc.h>
37#include <emmintrin.h>
38
Anders Carlsson566d8da2008-12-22 00:01:20 +000039static inline __m128 __attribute__((__always_inline__)) _mm_add_ss(__m128 a, __m128 b)
40{
41 return __builtin_ia32_addss(a, b);
42}
43
44static inline __m128 __attribute__((__always_inline__)) _mm_add_ps(__m128 a, __m128 b)
45{
46 return a + b;
47}
48
49static inline __m128 __attribute__((__always_inline__)) _mm_sub_ss(__m128 a, __m128 b)
50{
51 return __builtin_ia32_subss(a, b);
52}
53
54static inline __m128 __attribute__((__always_inline__)) _mm_sub_ps(__m128 a, __m128 b)
55{
56 return a - b;
57}
58
59static inline __m128 __attribute__((__always_inline__)) _mm_mul_ss(__m128 a, __m128 b)
60{
61 return __builtin_ia32_mulss(a, b);
62}
63
64static inline __m128 __attribute__((__always_inline__)) _mm_mul_ps(__m128 a, __m128 b)
65{
66 return a * b;
67}
68
69static inline __m128 __attribute__((__always_inline__)) _mm_div_ss(__m128 a, __m128 b)
70{
71 return __builtin_ia32_divss(a, b);
72}
73
74static inline __m128 __attribute__((__always_inline__)) _mm_div_ps(__m128 a, __m128 b)
75{
76 return a / b;
77}
78
79static inline __m128 __attribute__((__always_inline__)) _mm_sqrt_ss(__m128 a)
80{
81 return __builtin_ia32_sqrtss(a);
82}
83
84static inline __m128 __attribute__((__always_inline__)) _mm_sqrt_ps(__m128 a)
85{
86 return __builtin_ia32_sqrtps(a);
87}
88
89static inline __m128 __attribute__((__always_inline__)) _mm_rcp_ss(__m128 a)
90{
91 return __builtin_ia32_rcpss(a);
92}
93
94static inline __m128 __attribute__((__always_inline__)) _mm_rcp_ps(__m128 a)
95{
96 return __builtin_ia32_rcpps(a);
97}
98
99static inline __m128 __attribute__((__always_inline__)) _mm_rsqrt_ss(__m128 a)
100{
101 return __builtin_ia32_rsqrtss(a);
102}
103
104static inline __m128 __attribute__((__always_inline__)) _mm_rsqrt_ps(__m128 a)
105{
106 return __builtin_ia32_rsqrtps(a);
107}
108
109static inline __m128 __attribute__((__always_inline__)) _mm_min_ss(__m128 a, __m128 b)
110{
111 return __builtin_ia32_minss(a, b);
112}
113
114static inline __m128 __attribute__((__always_inline__)) _mm_min_ps(__m128 a, __m128 b)
115{
116 return __builtin_ia32_minps(a, b);
117}
118
119static inline __m128 __attribute__((__always_inline__)) _mm_max_ss(__m128 a, __m128 b)
120{
121 return __builtin_ia32_maxss(a, b);
122}
123
124static inline __m128 __attribute__((__always_inline__)) _mm_max_ps(__m128 a, __m128 b)
125{
126 return __builtin_ia32_maxps(a, b);
127}
128
129static inline __m128 __attribute__((__always_inline__)) _mm_and_ps(__m128 a, __m128 b)
130{
131 return __builtin_ia32_andps(a, b);
132}
133
134static inline __m128 __attribute__((__always_inline__)) _mm_andnot_ps(__m128 a, __m128 b)
135{
136 return __builtin_ia32_andnps(a, b);
137}
138
139static inline __m128 __attribute__((__always_inline__)) _mm_or_ps(__m128 a, __m128 b)
140{
141 return __builtin_ia32_orps(a, b);
142}
143
144static inline __m128 __attribute__((__always_inline__)) _mm_xor_ps(__m128 a, __m128 b)
145{
146 return __builtin_ia32_xorps(a, b);
147}
148
Anders Carlssonf62c6812008-12-22 00:28:39 +0000149static inline __m128 __attribute__((__always_inline__)) _mm_cmpeq_ss(__m128 a, __m128 b)
150{
151 return (__m128)__builtin_ia32_cmpeqss(a, b);
152}
153
154static inline __m128 __attribute__((__always_inline__)) _mm_cmpeq_ps(__m128 a, __m128 b)
155{
156 return (__m128)__builtin_ia32_cmpeqps(a, b);
157}
158
159static inline __m128 __attribute__((__always_inline__)) _mm_cmplt_ss(__m128 a, __m128 b)
160{
161 return (__m128)__builtin_ia32_cmpltss(a, b);
162}
163
164static inline __m128 __attribute__((__always_inline__)) _mm_cmplt_ps(__m128 a, __m128 b)
165{
166 return (__m128)__builtin_ia32_cmpltps(a, b);
167}
168
169static inline __m128 __attribute__((__always_inline__)) _mm_cmple_ss(__m128 a, __m128 b)
170{
171 return (__m128)__builtin_ia32_cmpless(a, b);
172}
173
174static inline __m128 __attribute__((__always_inline__)) _mm_cmple_ps(__m128 a, __m128 b)
175{
176 return (__m128)__builtin_ia32_cmpleps(a, b);
177}
178
179static inline __m128 __attribute__((__always_inline__)) _mm_cmpgt_ss(__m128 a, __m128 b)
180{
181 return (__m128)__builtin_ia32_cmpltss(b, a);
182}
183
184static inline __m128 __attribute__((__always_inline__)) _mm_cmpgt_ps(__m128 a, __m128 b)
185{
186 return (__m128)__builtin_ia32_cmpltps(b, a);
187}
188
189static inline __m128 __attribute__((__always_inline__)) _mm_cmpge_ss(__m128 a, __m128 b)
190{
191 return (__m128)__builtin_ia32_cmpless(b, a);
192}
193
194static inline __m128 __attribute__((__always_inline__)) _mm_cmpge_ps(__m128 a, __m128 b)
195{
196 return (__m128)__builtin_ia32_cmpleps(b, a);
197}
198
199static inline __m128 __attribute__((__always_inline__)) _mm_cmpneq_ss(__m128 a, __m128 b)
200{
201 return (__m128)__builtin_ia32_cmpneqss(a, b);
202}
203
204static inline __m128 __attribute__((__always_inline__)) _mm_cmpneq_ps(__m128 a, __m128 b)
205{
206 return (__m128)__builtin_ia32_cmpneqps(a, b);
207}
208
209static inline __m128 __attribute__((__always_inline__)) _mm_cmpnlt_ss(__m128 a, __m128 b)
210{
211 return (__m128)__builtin_ia32_cmpnltss(a, b);
212}
213
214static inline __m128 __attribute__((__always_inline__)) _mm_cmpnlt_ps(__m128 a, __m128 b)
215{
216 return (__m128)__builtin_ia32_cmpnltps(a, b);
217}
218
219static inline __m128 __attribute__((__always_inline__)) _mm_cmpnle_ss(__m128 a, __m128 b)
220{
221 return (__m128)__builtin_ia32_cmpnless(a, b);
222}
223
224static inline __m128 __attribute__((__always_inline__)) _mm_cmpnle_ps(__m128 a, __m128 b)
225{
226 return (__m128)__builtin_ia32_cmpnleps(a, b);
227}
228
229static inline __m128 __attribute__((__always_inline__)) _mm_cmpngt_ss(__m128 a, __m128 b)
230{
231 return (__m128)__builtin_ia32_cmpnltss(b, a);
232}
233
234static inline __m128 __attribute__((__always_inline__)) _mm_cmpngt_ps(__m128 a, __m128 b)
235{
236 return (__m128)__builtin_ia32_cmpnltps(b, a);
237}
238
239static inline __m128 __attribute__((__always_inline__)) _mm_cmpnge_ss(__m128 a, __m128 b)
240{
241 return (__m128)__builtin_ia32_cmpnless(b, a);
242}
243
244static inline __m128 __attribute__((__always_inline__)) _mm_cmpnge_ps(__m128 a, __m128 b)
245{
246 return (__m128)__builtin_ia32_cmpnleps(b, a);
247}
248
249static inline __m128 __attribute__((__always_inline__)) _mm_cmpord_ss(__m128 a, __m128 b)
250{
251 return (__m128)__builtin_ia32_cmpordss(a, b);
252}
253
254static inline __m128 __attribute__((__always_inline__)) _mm_cmpord_ps(__m128 a, __m128 b)
255{
256 return (__m128)__builtin_ia32_cmpordps(a, b);
257}
258
259static inline __m128 __attribute__((__always_inline__)) _mm_cmpunord_ss(__m128 a, __m128 b)
260{
261 return (__m128)__builtin_ia32_cmpunordss(a, b);
262}
263
264static inline __m128 __attribute__((__always_inline__)) _mm_cmpunord_ps(__m128 a, __m128 b)
265{
266 return (__m128)__builtin_ia32_cmpunordps(a, b);
267}
268
269static inline int __attribute__((__always_inline__)) _mm_comieq_ss(__m128 a, __m128 b)
270{
271 return __builtin_ia32_comieq(a, b);
272}
273
274static inline int __attribute__((__always_inline__)) _mm_comilt_ss(__m128 a, __m128 b)
275{
276 return __builtin_ia32_comilt(a, b);
277}
278
279static inline int __attribute__((__always_inline__)) _mm_comile_ss(__m128 a, __m128 b)
280{
281 return __builtin_ia32_comile(a, b);
282}
283
284static inline int __attribute__((__always_inline__)) _mm_comigt_ss(__m128 a, __m128 b)
285{
286 return __builtin_ia32_comigt(a, b);
287}
288
289static inline int __attribute__((__always_inline__)) _mm_comige_ss(__m128 a, __m128 b)
290{
291 return __builtin_ia32_comige(a, b);
292}
293
294static inline int __attribute__((__always_inline__)) _mm_comineq_ss(__m128 a, __m128 b)
295{
296 return __builtin_ia32_comineq(a, b);
297}
298
299static inline int __attribute__((__always_inline__)) _mm_ucomieq_ss(__m128 a, __m128 b)
300{
301 return __builtin_ia32_ucomieq(a, b);
302}
303
304static inline int __attribute__((__always_inline__)) _mm_ucomilt_ss(__m128 a, __m128 b)
305{
306 return __builtin_ia32_ucomilt(a, b);
307}
308
309static inline int __attribute__((__always_inline__)) _mm_ucomile_ss(__m128 a, __m128 b)
310{
311 return __builtin_ia32_ucomile(a, b);
312}
313
314static inline int __attribute__((__always_inline__)) _mm_ucomigt_ss(__m128 a, __m128 b)
315{
316 return __builtin_ia32_ucomigt(a, b);
317}
318
319static inline int __attribute__((__always_inline__)) _mm_ucomige_ss(__m128 a, __m128 b)
320{
321 return __builtin_ia32_ucomige(a, b);
322}
323
324static inline int __attribute__((__always_inline__)) _mm_ucomineq_ss(__m128 a, __m128 b)
325{
326 return __builtin_ia32_ucomineq(a, b);
327}
328
Anders Carlsson4fcc3132008-12-22 00:48:30 +0000329static inline int __attribute__((__always_inline__)) _mm_cvtss_si32(__m128 a)
330{
331 return __builtin_ia32_cvtss2si(a);
332}
333
334static inline long long __attribute__((__always_inline__)) _mm_cvtss_si64(__m128 a)
335{
336 return __builtin_ia32_cvtss2si64(a);
337}
338
339static inline __m64 __attribute__((__always_inline__)) _mm_cvtps_pi32(__m128 a)
340{
341 return (__m64)__builtin_ia32_cvtps2pi(a);
342}
343
344static inline int __attribute__((__always_inline__)) _mm_cvttss_si32(__m128 a)
345{
346 return __builtin_ia32_cvttss2si(a);
347}
348
349static inline long long __attribute__((__always_inline__)) _mm_cvttss_si64(__m128 a)
350{
351 return __builtin_ia32_cvttss2si64(a);
352}
353
354static inline __m64 __attribute__((__always_inline__)) _mm_cvttps_pi32(__m128 a)
355{
356 return (__m64)__builtin_ia32_cvttps2pi(a);
357}
358
359static inline __m128 __attribute__((__always_inline__)) _mm_cvtsi32_ss(__m128 a, int b)
360{
361 return __builtin_ia32_cvtsi2ss(a, b);
362}
363
Anders Carlsson1b76b802008-12-22 01:26:50 +0000364#ifdef __x86_64__
365
Anders Carlsson4fcc3132008-12-22 00:48:30 +0000366static inline __m128 __attribute__((__always_inline__)) _mm_cvtsi64_ss(__m128 a, long long b)
367{
368 return __builtin_ia32_cvtsi642ss(a, b);
369}
370
Anders Carlsson1b76b802008-12-22 01:26:50 +0000371#endif
372
Anders Carlsson4fcc3132008-12-22 00:48:30 +0000373static inline __m128 __attribute__((__always_inline__)) _mm_cvtpi32_ps(__m128 a, __m64 b)
374{
375 return __builtin_ia32_cvtpi2ps(a, (__v2si)b);
376}
377
Anders Carlsson1b76b802008-12-22 01:26:50 +0000378static inline float __attribute__((__always_inline__)) _mm_cvtss_f32(__m128 a)
379{
Anders Carlssona6431dc2008-12-22 07:08:03 +0000380 return a[0];
Anders Carlsson1b76b802008-12-22 01:26:50 +0000381}
382
Anders Carlsson97700862008-12-22 02:43:30 +0000383static inline __m128 __attribute__((__always_inline__)) _mm_loadh_pi(__m128 a, __m64 const *p)
384{
385 return __builtin_ia32_loadhps(a, (__v2si *)p);
386}
387
388static inline __m128 __attribute__((__always_inline__)) _mm_loadl_pi(__m128 a, __m64 const *p)
389{
390 return __builtin_ia32_loadlps(a, (__v2si *)p);
391}
392
393static inline __m128 __attribute__((__always_inline__)) _mm_load_ss(float *p)
394{
395 return (__m128){ *p, 0, 0, 0 };
396}
397
398static inline __m128 __attribute__((__always_inline__)) _mm_load1_ps(float *p)
399{
400 return (__m128){ *p, *p, *p, *p };
401}
402
403static inline __m128 __attribute__((__always_inline__)) _mm_load_ps(float *p)
404{
405 return *(__m128*)p;
406}
407
408static inline __m128 __attribute__((__always_inline__)) _mm_loadu_ps(float *p)
409{
410 return __builtin_ia32_loadups(p);
411}
412
413static inline __m128 __attribute__((__always_inline__)) _mm_loadr_ps(float *p)
414{
415 __m128 a = _mm_load_ps(p);
416 return __builtin_shufflevector(a, a, 3, 2, 1, 0);
417}
418
Anders Carlssona6ba0012008-12-22 02:51:35 +0000419static inline __m128 __attribute__((__always_inline__)) _mm_set_ss(float w)
420{
421 return (__m128){ w, 0, 0, 0 };
422}
423
424static inline __m128 __attribute__((__always_inline__)) _mm_set1_ps(float w)
425{
426 return (__m128){ w, w, w, w };
427}
428
429static inline __m128 __attribute__((__always_inline__)) _mm_set_ps(float z, float y, float x, float w)
430{
431 return (__m128){ w, x, y, z };
432}
433
434static inline __m128 __attribute__((__always_inline__)) _mm_setr_ps(float z, float y, float x, float w)
435{
436 return (__m128){ z, y, x, w };
437}
438
439static inline __m128 __attribute__((__always__inline__)) _mm_setzero_ps(void)
440{
441 return (__m128){ 0, 0, 0, 0 };
442}
443
Anders Carlsson09b93052008-12-22 03:16:40 +0000444static inline void __attribute__((__always__inline__)) _mm_storeh_pi(__m64 *p, __m128 a)
445{
446 __builtin_ia32_storehps((__v2si *)p, a);
447}
448
449static inline void __attribute__((__always__inline__)) _mm_storel_pi(__m64 *p, __m128 a)
450{
451 __builtin_ia32_storelps((__v2si *)p, a);
452}
453
454static inline void __attribute__((__always__inline__)) _mm_store_ss(float *p, __m128 a)
455{
456 *p = a[0];
457}
458
459static inline void __attribute__((__always_inline__)) _mm_storeu_ps(float *p, __m128 a)
460{
461 __builtin_ia32_storeups(p, a);
462}
463
464static inline void __attribute__((__always_inline__)) _mm_store1_ps(float *p, __m128 a)
465{
466 a = __builtin_shufflevector(a, a, 0, 0, 0, 0);
467 _mm_storeu_ps(p, a);
468}
469
470static inline void __attribute__((__always_inline__)) _mm_store_ps(float *p, __m128 a)
471{
472 *(__m128 *)p = a;
473}
474
475static inline void __attribute__((__always_inline__)) _mm_storer_ps(float *p, __m128 a)
476{
477 a = __builtin_shufflevector(a, a, 3, 2, 1, 0);
478 _mm_store_ps(p, a);
479}
480
Anders Carlssondedad4e2008-12-22 03:50:21 +0000481#define _MM_HINT_T0 1
482#define _MM_HINT_T1 2
483#define _MM_HINT_T2 3
484#define _MM_HINT_NTA 0
485
Anders Carlsson62af71c2008-12-22 04:55:36 +0000486/* FIXME: We have to #define this because "sel" must be a constant integer, and
487 Sema doesn't do any form of constant propagation yet. */
Anders Carlssondedad4e2008-12-22 03:50:21 +0000488
489#define _mm_prefetch(a, sel) (__builtin_prefetch((void *)a, 0, sel))
490
491static inline void __attribute__((__always_inline__)) _mm_stream_pi(__m64 *p, __m64 a)
492{
493 __builtin_ia32_movntq(p, a);
494}
495
496static inline void __attribute__((__always_inline__)) _mm_stream_ps(float *p, __m128 a)
497{
498 __builtin_ia32_movntps(p, a);
499}
500
501static inline void __attribute__((__always_inline__)) _mm_sfence(void)
502{
503 __builtin_ia32_sfence();
504}
505
Anders Carlsson62af71c2008-12-22 04:55:36 +0000506static inline int __attribute__((__always_inline__)) _mm_extract_pi16(__m64 a, int n)
507{
508 /* FIXME:
509 * This should force n to be an immediate.
510 * This does not use the PEXTRW instruction. From looking at the LLVM source, the
511 instruction doesn't seem to be hooked up.
512 * The code could probably be made better :)
513 */
514 __v4hi b = (__v4hi)a;
515 return b[(n == 0) ? 0 : (n == 1 ? 1 : (n == 2 ? 2 : 3))];
516}
517
Anders Carlsson8e28d242008-12-22 07:34:23 +0000518/* FIXME: Implement this. We could add a __builtin_insertelement function that's similar to
519 the already existing __builtin_shufflevector.
520*/
521/*
Anders Carlsson62af71c2008-12-22 04:55:36 +0000522static inline __m64 __attribute__((__always_inline__)) _mm_insert_pi16(__m64 a, int d, int n)
523{
Anders Carlsson62af71c2008-12-22 04:55:36 +0000524 return (__m64){ 0LL };
525}
Anders Carlsson8e28d242008-12-22 07:34:23 +0000526*/
Anders Carlsson62af71c2008-12-22 04:55:36 +0000527
528static inline __m64 __attribute__((__always_inline__)) _mm_max_pi16(__m64 a, __m64 b)
529{
530 return (__m64)__builtin_ia32_pmaxsw((__v4hi)a, (__v4hi)b);
531}
532
533static inline __m64 __attribute__((__always_inline__)) _mm_max_pu8(__m64 a, __m64 b)
534{
535 return (__m64)__builtin_ia32_pmaxub((__v8qi)a, (__v8qi)b);
536}
537
538static inline __m64 __attribute__((__always_inline__)) _mm_min_pi16(__m64 a, __m64 b)
539{
540 return (__m64)__builtin_ia32_pminsw((__v4hi)a, (__v4hi)b);
541}
542
543static inline __m64 __attribute__((__always_inline__)) _mm_min_pu8(__m64 a, __m64 b)
544{
545 return (__m64)__builtin_ia32_pminub((__v8qi)a, (__v8qi)b);
546}
547
548static inline int __attribute__((__always_inline__)) _mm_movemask_pi8(__m64 a)
549{
550 return __builtin_ia32_pmovmskb((__v8qi)a);
551}
552
553static inline __m64 __attribute__((__always_inline__)) _mm_mulhi_pu16(__m64 a, __m64 b)
554{
555 return (__m64)__builtin_ia32_pmulhuw((__v4hi)a, (__v4hi)b);
556}
557
558#define _mm_shuffle_pi16(a, n) ((__m64)__builtin_ia32_pshufw((__v4hi)a, n))
559
560static inline void __attribute__((__always_inline__)) _mm_maskmove_si64(__m64 d, __m64 n, char *p)
561{
562 __builtin_ia32_maskmovq((__v8qi)d, (__v8qi)n, p);
563}
564
565static inline __m64 __attribute__((__always_inline__)) _mm_avg_pu8(__m64 a, __m64 b)
566{
567 return (__m64)__builtin_ia32_pavgb((__v8qi)a, (__v8qi)b);
568}
569
570static inline __m64 __attribute__((__always_inline__)) _mm_avg_pu16(__m64 a, __m64 b)
571{
572 return (__m64)__builtin_ia32_pavgw((__v4hi)a, (__v4hi)b);
573}
574
575static inline __m64 __attribute__((__always_inline___)) _mm_sad_pu8(__m64 a, __m64 b)
576{
577 return (__m64)__builtin_ia32_psadbw((__v8qi)a, (__v8qi)b);
578}
Anders Carlssonc1f9afd2008-12-22 05:00:07 +0000579
580static inline unsigned int __attribute__((__always_inline___)) _mm_getcsr(void)
581{
582 return __builtin_ia32_stmxcsr();
583}
584
585static inline void __attribute__((__always_inline__)) _mm_setcsr(unsigned int i)
586{
587 __builtin_ia32_ldmxcsr(i);
588}
589
Anders Carlsson50099cb2008-12-22 05:20:34 +0000590#define _mm_shuffle_ps(a, b, mask) (__builtin_ia32_shufps(a, b, mask))
591
592static inline __m128 __attribute__((__always_inline__)) _mm_unpackhi_ps(__m128 a, __m128 b)
593{
594 return __builtin_shufflevector(a, b, 2, 6, 3, 7);
595}
596
597static inline __m128 __attribute__((__always_inline__)) _mm_unpacklo_ps(__m128 a, __m128 b)
598{
599 return __builtin_shufflevector(a, b, 0, 4, 1, 5);
600}
601
602static inline __m128 __attribute__((__always_inline__)) _mm_move_ss(__m128 a, __m128 b)
603{
604 return __builtin_shufflevector(a, b, 4, 1, 2, 3);
605}
606
607static inline __m128 __attribute__((__always_inline__)) _mm_movehl_ps(__m128 a, __m128 b)
608{
609 return __builtin_shufflevector(a, b, 6, 7, 2, 3);
610}
611
612static inline __m128 __attribute__((__always_inline__)) _mm_movelh_ps(__m128 a, __m128 b)
613{
614 return __builtin_shufflevector(a, b, 0, 1, 4, 5);
615}
616
Anders Carlssona6431dc2008-12-22 07:08:03 +0000617static inline __m128 __attribute__((__always_inline__)) _mm_cvtpi16_ps(__m64 a)
618{
619 __m64 b, c;
620 __m128 r;
621
622 b = _mm_setzero_si64();
623 b = _mm_cmpgt_pi16(b, a);
624 c = _mm_unpackhi_pi16(a, b);
625 r = _mm_setzero_ps();
626 r = _mm_cvtpi32_ps(r, c);
627 r = _mm_movelh_ps(r, r);
628 c = _mm_unpacklo_pi16(a, b);
629 r = _mm_cvtpi32_ps(r, c);
630
631 return r;
632}
633
634static inline __m128 __attribute__((__always_inline__)) _mm_cvtpu16_ps(__m64 a)
635{
636 __m64 b, c;
637 __m128 r;
638
639 b = _mm_setzero_si64();
640 c = _mm_unpackhi_pi16(a, b);
641 r = _mm_setzero_ps();
642 r = _mm_cvtpi32_ps(r, c);
643 r = _mm_movelh_ps(r, r);
644 c = _mm_unpacklo_pi16(a, b);
645 r = _mm_cvtpi32_ps(r, c);
646
647 return r;
648}
649
650static inline __m128 __attribute__((__always_inline__)) _mm_cvtpi8_ps(__m64 a)
651{
652 __m64 b;
653
654 b = _mm_setzero_si64();
655 b = _mm_cmpgt_pi8(b, a);
656 b = _mm_unpacklo_pi8(a, b);
657
658 return _mm_cvtpi16_ps(b);
659}
660
661static inline __m128 __attribute__((__always_inline__)) _mm_cvtpu8_ps(__m64 a)
662{
663 __m64 b;
664
665 b = _mm_setzero_si64();
666 b = _mm_unpacklo_pi8(a, b);
667
668 return _mm_cvtpi16_ps(b);
669}
670
671static inline __m128 __attribute__((__always_inline__)) _mm_cvtpi32x2_ps(__m64 a, __m64 b)
672{
673 __m128 c;
674
675 c = _mm_setzero_ps();
676 c = _mm_cvtpi32_ps(c, b);
677 c = _mm_movelh_ps(c, c);
678
679 return _mm_cvtpi32_ps(c, a);
680}
681
682static inline __m64 __attribute__((__always_inline__)) _mm_cvtps_pi16(__m128 a)
683{
684 __m64 b, c;
685
686 b = _mm_cvtps_pi32(a);
687 a = _mm_movehl_ps(a, a);
688 c = _mm_cvtps_pi32(a);
689
690 return _mm_packs_pi16(b, c);
691}
692
693static inline __m64 __attribute__((__always_inline__)) _mm_cvtps_pi8(__m128 a)
694{
695 __m64 b, c;
696
697 b = _mm_cvtps_pi16(a);
698 c = _mm_setzero_si64();
699
700 return _mm_packs_pi16(b, c);
701}
702
Anders Carlsson50099cb2008-12-22 05:20:34 +0000703static inline int __attribute__((__always_inline__)) _mm_movemask_ps(__m128 a)
704{
705 return __builtin_ia32_movmskps(a);
706}
707
Anders Carlssonb5955092008-12-22 05:42:03 +0000708#define _MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
709
710#define _MM_MASK_MASK (0x1f80)
711#define _MM_EXCEPT_MASK (0x003f)
712#define _MM_FLUSH_MASK (0x8000)
713#define _MM_ROUND_MASK (0x6000)
714
715#define _MM_GET_EXCEPTION_MASK() (_mm_getcsr() & _MM_MASK_MASK)
716#define _MM_GET_EXCEPTION_STATE() (_mm_getcsr() & _MM_EXCEPT_MASK)
717#define _MM_GET_FLUSH_ZERO_MODE() (_mm_getcsr() & _MM_FLUSH_MASK)
718#define _MM_GET_ROUNDING_MODE() (_mm_getcsr() & _MM_ROUND_MASK)
719
720#define _MM_SET_EXCEPTION_MASK(x) (_mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_MASK) | (x)))
721#define _MM_SET_EXCEPTION_STATE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | (x)))
722#define _MM_SET_FLUSH_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_MASK) | (x)))
723#define _MM_SET_ROUNDING_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | (x)))
724
725#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
726do { \
727 __m128 tmp3, tmp2, tmp1, tmp0; \
728 tmp0 = _mm_unpacklo_ps((row0), (row1)); \
729 tmp2 = _mm_unpacklo_ps((row2), (row3)); \
730 tmp1 = _mm_unpackhi_ps((row0), (row1)); \
731 tmp3 = _mm_unpackhi_ps((row2), (row3)); \
732 (row0) = _mm_movelh_ps(tmp0, tmp2); \
733 (row1) = _mm_movehl_ps(tmp2, tmp0); \
734 (row2) = _mm_movelh_ps(tmp1, tmp3); \
735 (row3) = _mm_movelh_ps(tmp3, tmp1); \
736} while (0)
737
Anders Carlsson566d8da2008-12-22 00:01:20 +0000738#endif /* __SSE__ */
739
740#endif /* __XMMINTRIN_H */