Stephen Hines | 51a0ffb | 2014-02-14 00:25:07 -0800 | [diff] [blame] | 1 | /*===---- smmintrin.h - SSE4 intrinsics ------------------------------------=== |
| 2 | * |
| 3 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 4 | * of this software and associated documentation files (the "Software"), to deal |
| 5 | * in the Software without restriction, including without limitation the rights |
| 6 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
| 7 | * copies of the Software, and to permit persons to whom the Software is |
| 8 | * furnished to do so, subject to the following conditions: |
| 9 | * |
| 10 | * The above copyright notice and this permission notice shall be included in |
| 11 | * all copies or substantial portions of the Software. |
| 12 | * |
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 16 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 17 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 18 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
| 19 | * THE SOFTWARE. |
| 20 | * |
| 21 | *===-----------------------------------------------------------------------=== |
| 22 | */ |
| 23 | |
| 24 | #ifndef _SMMINTRIN_H |
| 25 | #define _SMMINTRIN_H |
| 26 | |
| 27 | #ifndef __SSE4_1__ |
| 28 | #error "SSE4.1 instruction set not enabled" |
| 29 | #else |
| 30 | |
| 31 | #include <tmmintrin.h> |
| 32 | |
| 33 | /* SSE4 Rounding macros. */ |
| 34 | #define _MM_FROUND_TO_NEAREST_INT 0x00 |
| 35 | #define _MM_FROUND_TO_NEG_INF 0x01 |
| 36 | #define _MM_FROUND_TO_POS_INF 0x02 |
| 37 | #define _MM_FROUND_TO_ZERO 0x03 |
| 38 | #define _MM_FROUND_CUR_DIRECTION 0x04 |
| 39 | |
| 40 | #define _MM_FROUND_RAISE_EXC 0x00 |
| 41 | #define _MM_FROUND_NO_EXC 0x08 |
| 42 | |
| 43 | #define _MM_FROUND_NINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT) |
| 44 | #define _MM_FROUND_FLOOR (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF) |
| 45 | #define _MM_FROUND_CEIL (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF) |
| 46 | #define _MM_FROUND_TRUNC (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO) |
| 47 | #define _MM_FROUND_RINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION) |
| 48 | #define _MM_FROUND_NEARBYINT (_MM_FROUND_NO_EXC | _MM_FROUND_CUR_DIRECTION) |
| 49 | |
| 50 | #define _mm_ceil_ps(X) _mm_round_ps((X), _MM_FROUND_CEIL) |
| 51 | #define _mm_ceil_pd(X) _mm_round_pd((X), _MM_FROUND_CEIL) |
| 52 | #define _mm_ceil_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_CEIL) |
| 53 | #define _mm_ceil_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_CEIL) |
| 54 | |
| 55 | #define _mm_floor_ps(X) _mm_round_ps((X), _MM_FROUND_FLOOR) |
| 56 | #define _mm_floor_pd(X) _mm_round_pd((X), _MM_FROUND_FLOOR) |
| 57 | #define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR) |
| 58 | #define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR) |
| 59 | |
| 60 | #define _mm_round_ps(X, M) __extension__ ({ \ |
| 61 | __m128 __X = (X); \ |
| 62 | (__m128) __builtin_ia32_roundps((__v4sf)__X, (M)); }) |
| 63 | |
| 64 | #define _mm_round_ss(X, Y, M) __extension__ ({ \ |
| 65 | __m128 __X = (X); \ |
| 66 | __m128 __Y = (Y); \ |
| 67 | (__m128) __builtin_ia32_roundss((__v4sf)__X, (__v4sf)__Y, (M)); }) |
| 68 | |
| 69 | #define _mm_round_pd(X, M) __extension__ ({ \ |
| 70 | __m128d __X = (X); \ |
| 71 | (__m128d) __builtin_ia32_roundpd((__v2df)__X, (M)); }) |
| 72 | |
| 73 | #define _mm_round_sd(X, Y, M) __extension__ ({ \ |
| 74 | __m128d __X = (X); \ |
| 75 | __m128d __Y = (Y); \ |
| 76 | (__m128d) __builtin_ia32_roundsd((__v2df)__X, (__v2df)__Y, (M)); }) |
| 77 | |
| 78 | /* SSE4 Packed Blending Intrinsics. */ |
| 79 | #define _mm_blend_pd(V1, V2, M) __extension__ ({ \ |
| 80 | __m128d __V1 = (V1); \ |
| 81 | __m128d __V2 = (V2); \ |
| 82 | (__m128d) __builtin_ia32_blendpd ((__v2df)__V1, (__v2df)__V2, (M)); }) |
| 83 | |
| 84 | #define _mm_blend_ps(V1, V2, M) __extension__ ({ \ |
| 85 | __m128 __V1 = (V1); \ |
| 86 | __m128 __V2 = (V2); \ |
| 87 | (__m128) __builtin_ia32_blendps ((__v4sf)__V1, (__v4sf)__V2, (M)); }) |
| 88 | |
| 89 | static __inline__ __m128d __attribute__((__always_inline__, __nodebug__)) |
| 90 | _mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M) |
| 91 | { |
| 92 | return (__m128d) __builtin_ia32_blendvpd ((__v2df)__V1, (__v2df)__V2, |
| 93 | (__v2df)__M); |
| 94 | } |
| 95 | |
| 96 | static __inline__ __m128 __attribute__((__always_inline__, __nodebug__)) |
| 97 | _mm_blendv_ps (__m128 __V1, __m128 __V2, __m128 __M) |
| 98 | { |
| 99 | return (__m128) __builtin_ia32_blendvps ((__v4sf)__V1, (__v4sf)__V2, |
| 100 | (__v4sf)__M); |
| 101 | } |
| 102 | |
| 103 | static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) |
| 104 | _mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M) |
| 105 | { |
| 106 | return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__V1, (__v16qi)__V2, |
| 107 | (__v16qi)__M); |
| 108 | } |
| 109 | |
| 110 | #define _mm_blend_epi16(V1, V2, M) __extension__ ({ \ |
| 111 | __m128i __V1 = (V1); \ |
| 112 | __m128i __V2 = (V2); \ |
| 113 | (__m128i) __builtin_ia32_pblendw128 ((__v8hi)__V1, (__v8hi)__V2, (M)); }) |
| 114 | |
| 115 | /* SSE4 Dword Multiply Instructions. */ |
| 116 | static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) |
| 117 | _mm_mullo_epi32 (__m128i __V1, __m128i __V2) |
| 118 | { |
| 119 | return (__m128i) ((__v4si)__V1 * (__v4si)__V2); |
| 120 | } |
| 121 | |
| 122 | static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) |
| 123 | _mm_mul_epi32 (__m128i __V1, __m128i __V2) |
| 124 | { |
| 125 | return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__V1, (__v4si)__V2); |
| 126 | } |
| 127 | |
| 128 | /* SSE4 Floating Point Dot Product Instructions. */ |
| 129 | #define _mm_dp_ps(X, Y, M) __extension__ ({ \ |
| 130 | __m128 __X = (X); \ |
| 131 | __m128 __Y = (Y); \ |
| 132 | (__m128) __builtin_ia32_dpps((__v4sf)__X, (__v4sf)__Y, (M)); }) |
| 133 | |
| 134 | #define _mm_dp_pd(X, Y, M) __extension__ ({\ |
| 135 | __m128d __X = (X); \ |
| 136 | __m128d __Y = (Y); \ |
| 137 | (__m128d) __builtin_ia32_dppd((__v2df)__X, (__v2df)__Y, (M)); }) |
| 138 | |
| 139 | /* SSE4 Streaming Load Hint Instruction. */ |
| 140 | static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) |
| 141 | _mm_stream_load_si128 (__m128i *__V) |
| 142 | { |
| 143 | return (__m128i) __builtin_ia32_movntdqa ((__v2di *) __V); |
| 144 | } |
| 145 | |
| 146 | /* SSE4 Packed Integer Min/Max Instructions. */ |
| 147 | static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) |
| 148 | _mm_min_epi8 (__m128i __V1, __m128i __V2) |
| 149 | { |
| 150 | return (__m128i) __builtin_ia32_pminsb128 ((__v16qi) __V1, (__v16qi) __V2); |
| 151 | } |
| 152 | |
| 153 | static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) |
| 154 | _mm_max_epi8 (__m128i __V1, __m128i __V2) |
| 155 | { |
| 156 | return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi) __V1, (__v16qi) __V2); |
| 157 | } |
| 158 | |
| 159 | static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) |
| 160 | _mm_min_epu16 (__m128i __V1, __m128i __V2) |
| 161 | { |
| 162 | return (__m128i) __builtin_ia32_pminuw128 ((__v8hi) __V1, (__v8hi) __V2); |
| 163 | } |
| 164 | |
| 165 | static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) |
| 166 | _mm_max_epu16 (__m128i __V1, __m128i __V2) |
| 167 | { |
| 168 | return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi) __V1, (__v8hi) __V2); |
| 169 | } |
| 170 | |
| 171 | static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) |
| 172 | _mm_min_epi32 (__m128i __V1, __m128i __V2) |
| 173 | { |
| 174 | return (__m128i) __builtin_ia32_pminsd128 ((__v4si) __V1, (__v4si) __V2); |
| 175 | } |
| 176 | |
| 177 | static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) |
| 178 | _mm_max_epi32 (__m128i __V1, __m128i __V2) |
| 179 | { |
| 180 | return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si) __V1, (__v4si) __V2); |
| 181 | } |
| 182 | |
| 183 | static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) |
| 184 | _mm_min_epu32 (__m128i __V1, __m128i __V2) |
| 185 | { |
| 186 | return (__m128i) __builtin_ia32_pminud128((__v4si) __V1, (__v4si) __V2); |
| 187 | } |
| 188 | |
| 189 | static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) |
| 190 | _mm_max_epu32 (__m128i __V1, __m128i __V2) |
| 191 | { |
| 192 | return (__m128i) __builtin_ia32_pmaxud128((__v4si) __V1, (__v4si) __V2); |
| 193 | } |
| 194 | |
| 195 | /* SSE4 Insertion and Extraction from XMM Register Instructions. */ |
| 196 | #define _mm_insert_ps(X, Y, N) __builtin_ia32_insertps128((X), (Y), (N)) |
| 197 | #define _mm_extract_ps(X, N) (__extension__ \ |
| 198 | ({ union { int __i; float __f; } __t; \ |
| 199 | __v4sf __a = (__v4sf)(X); \ |
| 200 | __t.__f = __a[(N) & 3]; \ |
| 201 | __t.__i;})) |
| 202 | |
| 203 | /* Miscellaneous insert and extract macros. */ |
| 204 | /* Extract a single-precision float from X at index N into D. */ |
| 205 | #define _MM_EXTRACT_FLOAT(D, X, N) (__extension__ ({ __v4sf __a = (__v4sf)(X); \ |
| 206 | (D) = __a[N]; })) |
| 207 | |
| 208 | /* Or together 2 sets of indexes (X and Y) with the zeroing bits (Z) to create |
| 209 | an index suitable for _mm_insert_ps. */ |
| 210 | #define _MM_MK_INSERTPS_NDX(X, Y, Z) (((X) << 6) | ((Y) << 4) | (Z)) |
| 211 | |
| 212 | /* Extract a float from X at index N into the first index of the return. */ |
| 213 | #define _MM_PICK_OUT_PS(X, N) _mm_insert_ps (_mm_setzero_ps(), (X), \ |
| 214 | _MM_MK_INSERTPS_NDX((N), 0, 0x0e)) |
| 215 | |
| 216 | /* Insert int into packed integer array at index. */ |
| 217 | #define _mm_insert_epi8(X, I, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \ |
| 218 | __a[(N) & 15] = (I); \ |
| 219 | __a;})) |
| 220 | #define _mm_insert_epi32(X, I, N) (__extension__ ({ __v4si __a = (__v4si)(X); \ |
| 221 | __a[(N) & 3] = (I); \ |
| 222 | __a;})) |
| 223 | #ifdef __x86_64__ |
| 224 | #define _mm_insert_epi64(X, I, N) (__extension__ ({ __v2di __a = (__v2di)(X); \ |
| 225 | __a[(N) & 1] = (I); \ |
| 226 | __a;})) |
| 227 | #endif /* __x86_64__ */ |
| 228 | |
| 229 | /* Extract int from packed integer array at index. This returns the element |
| 230 | * as a zero extended value, so it is unsigned. |
| 231 | */ |
| 232 | #define _mm_extract_epi8(X, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \ |
| 233 | (int)(unsigned char) \ |
| 234 | __a[(N) & 15];})) |
| 235 | #define _mm_extract_epi32(X, N) (__extension__ ({ __v4si __a = (__v4si)(X); \ |
| 236 | __a[(N) & 3];})) |
| 237 | #ifdef __x86_64__ |
| 238 | #define _mm_extract_epi64(X, N) (__extension__ ({ __v2di __a = (__v2di)(X); \ |
| 239 | __a[(N) & 1];})) |
| 240 | #endif /* __x86_64 */ |
| 241 | |
| 242 | /* SSE4 128-bit Packed Integer Comparisons. */ |
| 243 | static __inline__ int __attribute__((__always_inline__, __nodebug__)) |
| 244 | _mm_testz_si128(__m128i __M, __m128i __V) |
| 245 | { |
| 246 | return __builtin_ia32_ptestz128((__v2di)__M, (__v2di)__V); |
| 247 | } |
| 248 | |
| 249 | static __inline__ int __attribute__((__always_inline__, __nodebug__)) |
| 250 | _mm_testc_si128(__m128i __M, __m128i __V) |
| 251 | { |
| 252 | return __builtin_ia32_ptestc128((__v2di)__M, (__v2di)__V); |
| 253 | } |
| 254 | |
| 255 | static __inline__ int __attribute__((__always_inline__, __nodebug__)) |
| 256 | _mm_testnzc_si128(__m128i __M, __m128i __V) |
| 257 | { |
| 258 | return __builtin_ia32_ptestnzc128((__v2di)__M, (__v2di)__V); |
| 259 | } |
| 260 | |
| 261 | #define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_cmpeq_epi32((V), (V))) |
| 262 | #define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128((M), (V)) |
| 263 | #define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V)) |
| 264 | |
| 265 | /* SSE4 64-bit Packed Integer Comparisons. */ |
| 266 | static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) |
| 267 | _mm_cmpeq_epi64(__m128i __V1, __m128i __V2) |
| 268 | { |
| 269 | return (__m128i)((__v2di)__V1 == (__v2di)__V2); |
| 270 | } |
| 271 | |
| 272 | /* SSE4 Packed Integer Sign-Extension. */ |
| 273 | static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) |
| 274 | _mm_cvtepi8_epi16(__m128i __V) |
| 275 | { |
| 276 | return (__m128i) __builtin_ia32_pmovsxbw128((__v16qi) __V); |
| 277 | } |
| 278 | |
| 279 | static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) |
| 280 | _mm_cvtepi8_epi32(__m128i __V) |
| 281 | { |
| 282 | return (__m128i) __builtin_ia32_pmovsxbd128((__v16qi) __V); |
| 283 | } |
| 284 | |
| 285 | static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) |
| 286 | _mm_cvtepi8_epi64(__m128i __V) |
| 287 | { |
| 288 | return (__m128i) __builtin_ia32_pmovsxbq128((__v16qi) __V); |
| 289 | } |
| 290 | |
| 291 | static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) |
| 292 | _mm_cvtepi16_epi32(__m128i __V) |
| 293 | { |
| 294 | return (__m128i) __builtin_ia32_pmovsxwd128((__v8hi) __V); |
| 295 | } |
| 296 | |
| 297 | static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) |
| 298 | _mm_cvtepi16_epi64(__m128i __V) |
| 299 | { |
| 300 | return (__m128i) __builtin_ia32_pmovsxwq128((__v8hi)__V); |
| 301 | } |
| 302 | |
| 303 | static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) |
| 304 | _mm_cvtepi32_epi64(__m128i __V) |
| 305 | { |
| 306 | return (__m128i) __builtin_ia32_pmovsxdq128((__v4si)__V); |
| 307 | } |
| 308 | |
| 309 | /* SSE4 Packed Integer Zero-Extension. */ |
| 310 | static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) |
| 311 | _mm_cvtepu8_epi16(__m128i __V) |
| 312 | { |
| 313 | return (__m128i) __builtin_ia32_pmovzxbw128((__v16qi) __V); |
| 314 | } |
| 315 | |
| 316 | static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) |
| 317 | _mm_cvtepu8_epi32(__m128i __V) |
| 318 | { |
| 319 | return (__m128i) __builtin_ia32_pmovzxbd128((__v16qi)__V); |
| 320 | } |
| 321 | |
| 322 | static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) |
| 323 | _mm_cvtepu8_epi64(__m128i __V) |
| 324 | { |
| 325 | return (__m128i) __builtin_ia32_pmovzxbq128((__v16qi)__V); |
| 326 | } |
| 327 | |
| 328 | static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) |
| 329 | _mm_cvtepu16_epi32(__m128i __V) |
| 330 | { |
| 331 | return (__m128i) __builtin_ia32_pmovzxwd128((__v8hi)__V); |
| 332 | } |
| 333 | |
| 334 | static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) |
| 335 | _mm_cvtepu16_epi64(__m128i __V) |
| 336 | { |
| 337 | return (__m128i) __builtin_ia32_pmovzxwq128((__v8hi)__V); |
| 338 | } |
| 339 | |
| 340 | static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) |
| 341 | _mm_cvtepu32_epi64(__m128i __V) |
| 342 | { |
| 343 | return (__m128i) __builtin_ia32_pmovzxdq128((__v4si)__V); |
| 344 | } |
| 345 | |
| 346 | /* SSE4 Pack with Unsigned Saturation. */ |
| 347 | static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) |
| 348 | _mm_packus_epi32(__m128i __V1, __m128i __V2) |
| 349 | { |
| 350 | return (__m128i) __builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2); |
| 351 | } |
| 352 | |
| 353 | /* SSE4 Multiple Packed Sums of Absolute Difference. */ |
| 354 | #define _mm_mpsadbw_epu8(X, Y, M) __extension__ ({ \ |
| 355 | __m128i __X = (X); \ |
| 356 | __m128i __Y = (Y); \ |
| 357 | (__m128i) __builtin_ia32_mpsadbw128((__v16qi)__X, (__v16qi)__Y, (M)); }) |
| 358 | |
| 359 | static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) |
| 360 | _mm_minpos_epu16(__m128i __V) |
| 361 | { |
| 362 | return (__m128i) __builtin_ia32_phminposuw128((__v8hi)__V); |
| 363 | } |
| 364 | |
| 365 | /* These definitions are normally in nmmintrin.h, but gcc puts them in here |
| 366 | so we'll do the same. */ |
| 367 | #ifdef __SSE4_2__ |
| 368 | |
| 369 | /* These specify the type of data that we're comparing. */ |
| 370 | #define _SIDD_UBYTE_OPS 0x00 |
| 371 | #define _SIDD_UWORD_OPS 0x01 |
| 372 | #define _SIDD_SBYTE_OPS 0x02 |
| 373 | #define _SIDD_SWORD_OPS 0x03 |
| 374 | |
| 375 | /* These specify the type of comparison operation. */ |
| 376 | #define _SIDD_CMP_EQUAL_ANY 0x00 |
| 377 | #define _SIDD_CMP_RANGES 0x04 |
| 378 | #define _SIDD_CMP_EQUAL_EACH 0x08 |
| 379 | #define _SIDD_CMP_EQUAL_ORDERED 0x0c |
| 380 | |
| 381 | /* These macros specify the polarity of the operation. */ |
| 382 | #define _SIDD_POSITIVE_POLARITY 0x00 |
| 383 | #define _SIDD_NEGATIVE_POLARITY 0x10 |
| 384 | #define _SIDD_MASKED_POSITIVE_POLARITY 0x20 |
| 385 | #define _SIDD_MASKED_NEGATIVE_POLARITY 0x30 |
| 386 | |
| 387 | /* These macros are used in _mm_cmpXstri() to specify the return. */ |
| 388 | #define _SIDD_LEAST_SIGNIFICANT 0x00 |
| 389 | #define _SIDD_MOST_SIGNIFICANT 0x40 |
| 390 | |
| 391 | /* These macros are used in _mm_cmpXstri() to specify the return. */ |
| 392 | #define _SIDD_BIT_MASK 0x00 |
| 393 | #define _SIDD_UNIT_MASK 0x40 |
| 394 | |
| 395 | /* SSE4.2 Packed Comparison Intrinsics. */ |
| 396 | #define _mm_cmpistrm(A, B, M) __builtin_ia32_pcmpistrm128((A), (B), (M)) |
| 397 | #define _mm_cmpistri(A, B, M) __builtin_ia32_pcmpistri128((A), (B), (M)) |
| 398 | |
| 399 | #define _mm_cmpestrm(A, LA, B, LB, M) \ |
| 400 | __builtin_ia32_pcmpestrm128((A), (LA), (B), (LB), (M)) |
| 401 | #define _mm_cmpestri(A, LA, B, LB, M) \ |
| 402 | __builtin_ia32_pcmpestri128((A), (LA), (B), (LB), (M)) |
| 403 | |
| 404 | /* SSE4.2 Packed Comparison Intrinsics and EFlag Reading. */ |
| 405 | #define _mm_cmpistra(A, B, M) \ |
| 406 | __builtin_ia32_pcmpistria128((A), (B), (M)) |
| 407 | #define _mm_cmpistrc(A, B, M) \ |
| 408 | __builtin_ia32_pcmpistric128((A), (B), (M)) |
| 409 | #define _mm_cmpistro(A, B, M) \ |
| 410 | __builtin_ia32_pcmpistrio128((A), (B), (M)) |
| 411 | #define _mm_cmpistrs(A, B, M) \ |
| 412 | __builtin_ia32_pcmpistris128((A), (B), (M)) |
| 413 | #define _mm_cmpistrz(A, B, M) \ |
| 414 | __builtin_ia32_pcmpistriz128((A), (B), (M)) |
| 415 | |
| 416 | #define _mm_cmpestra(A, LA, B, LB, M) \ |
| 417 | __builtin_ia32_pcmpestria128((A), (LA), (B), (LB), (M)) |
| 418 | #define _mm_cmpestrc(A, LA, B, LB, M) \ |
| 419 | __builtin_ia32_pcmpestric128((A), (LA), (B), (LB), (M)) |
| 420 | #define _mm_cmpestro(A, LA, B, LB, M) \ |
| 421 | __builtin_ia32_pcmpestrio128((A), (LA), (B), (LB), (M)) |
| 422 | #define _mm_cmpestrs(A, LA, B, LB, M) \ |
| 423 | __builtin_ia32_pcmpestris128((A), (LA), (B), (LB), (M)) |
| 424 | #define _mm_cmpestrz(A, LA, B, LB, M) \ |
| 425 | __builtin_ia32_pcmpestriz128((A), (LA), (B), (LB), (M)) |
| 426 | |
| 427 | /* SSE4.2 Compare Packed Data -- Greater Than. */ |
| 428 | static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) |
| 429 | _mm_cmpgt_epi64(__m128i __V1, __m128i __V2) |
| 430 | { |
| 431 | return (__m128i)((__v2di)__V1 > (__v2di)__V2); |
| 432 | } |
| 433 | |
| 434 | /* SSE4.2 Accumulate CRC32. */ |
| 435 | static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__)) |
| 436 | _mm_crc32_u8(unsigned int __C, unsigned char __D) |
| 437 | { |
| 438 | return __builtin_ia32_crc32qi(__C, __D); |
| 439 | } |
| 440 | |
| 441 | static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__)) |
| 442 | _mm_crc32_u16(unsigned int __C, unsigned short __D) |
| 443 | { |
| 444 | return __builtin_ia32_crc32hi(__C, __D); |
| 445 | } |
| 446 | |
| 447 | static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__)) |
| 448 | _mm_crc32_u32(unsigned int __C, unsigned int __D) |
| 449 | { |
| 450 | return __builtin_ia32_crc32si(__C, __D); |
| 451 | } |
| 452 | |
| 453 | #ifdef __x86_64__ |
| 454 | static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__)) |
| 455 | _mm_crc32_u64(unsigned long long __C, unsigned long long __D) |
| 456 | { |
| 457 | return __builtin_ia32_crc32di(__C, __D); |
| 458 | } |
| 459 | #endif /* __x86_64__ */ |
| 460 | |
| 461 | #ifdef __POPCNT__ |
| 462 | #include <popcntintrin.h> |
| 463 | #endif |
| 464 | |
| 465 | #endif /* __SSE4_2__ */ |
| 466 | #endif /* __SSE4_1__ */ |
| 467 | |
| 468 | #endif /* _SMMINTRIN_H */ |