X86: add more GATHER intrinsics in Clang
Corrected type for index of _mm256_mask_i32gather_pd
from 256-bit to 128-bit
Corrected types for src|dst|mask of _mm256_mask_i64gather_ps
from 256-bit to 128-bit
Support the following intrinsics:
_mm_mask_i32gather_epi64, _mm256_mask_i32gather_epi64,
_mm_mask_i64gather_epi64, _mm256_mask_i64gather_epi64,
_mm_mask_i32gather_epi32, _mm256_mask_i32gather_epi32,
_mm_mask_i64gather_epi32, _mm256_mask_i64gather_epi32
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@159403 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/include/clang/Basic/BuiltinsX86.def b/include/clang/Basic/BuiltinsX86.def
index a331cde..4e8d13c 100644
--- a/include/clang/Basic/BuiltinsX86.def
+++ b/include/clang/Basic/BuiltinsX86.def
@@ -597,13 +597,22 @@
// GATHER
BUILTIN(__builtin_ia32_gatherd_pd, "V2dV2dV2dC*V4iV2dIc", "")
-BUILTIN(__builtin_ia32_gatherd_pd256, "V4dV4dV4dC*V8iV4dIc", "")
+BUILTIN(__builtin_ia32_gatherd_pd256, "V4dV4dV4dC*V4iV4dIc", "")
BUILTIN(__builtin_ia32_gatherq_pd, "V2dV2dV2dC*V2LLiV2dIc", "")
BUILTIN(__builtin_ia32_gatherq_pd256, "V4dV4dV4dC*V4LLiV4dIc", "")
BUILTIN(__builtin_ia32_gatherd_ps, "V4fV4fV4fC*V4iV4fIc", "")
BUILTIN(__builtin_ia32_gatherd_ps256, "V8fV8fV8fC*V8iV8fIc", "")
BUILTIN(__builtin_ia32_gatherq_ps, "V4fV4fV4fC*V2LLiV4fIc", "")
-BUILTIN(__builtin_ia32_gatherq_ps256, "V8fV8fV8fC*V4LLiV8fIc", "")
+BUILTIN(__builtin_ia32_gatherq_ps256, "V4fV4fV4fC*V4LLiV4fIc", "")
+
+BUILTIN(__builtin_ia32_gatherd_q, "V2LLiV2LLiV2LLiC*V4iV2LLiIc", "")
+BUILTIN(__builtin_ia32_gatherd_q256, "V4LLiV4LLiV4LLiC*V4iV4LLiIc", "")
+BUILTIN(__builtin_ia32_gatherq_q, "V2LLiV2LLiV2LLiC*V2LLiV2LLiIc", "")
+BUILTIN(__builtin_ia32_gatherq_q256, "V4LLiV4LLiV4LLiC*V4LLiV4LLiIc", "")
+BUILTIN(__builtin_ia32_gatherd_d, "V4iV4iV4iC*V4iV4iIc", "")
+BUILTIN(__builtin_ia32_gatherd_d256, "V8iV8iV8iC*V8iV8iIc", "")
+BUILTIN(__builtin_ia32_gatherq_d, "V4iV4iV4iC*V2LLiV4iIc", "")
+BUILTIN(__builtin_ia32_gatherq_d256, "V4iV4iV4iC*V4LLiV4iIc", "")
// BMI
BUILTIN(__builtin_ia32_bextr_u32, "UiUiUi", "")
diff --git a/lib/Headers/avx2intrin.h b/lib/Headers/avx2intrin.h
index 2a27bef..6878fbd 100644
--- a/lib/Headers/avx2intrin.h
+++ b/lib/Headers/avx2intrin.h
@@ -971,10 +971,10 @@
#define _mm256_mask_i32gather_pd(a, m, i, mask, s) __extension__ ({ \
__m256d __a = (a); \
double const *__m = (m); \
- __m256i __i = (i); \
+ __m128i __i = (i); \
__m256d __mask = (mask); \
(__m256d)__builtin_ia32_gatherd_pd256((__v4df)__a, (const __v4df *)__m, \
- (__v8si)__i, (__v4df)__mask, (s)); })
+ (__v4si)__i, (__v4df)__mask, (s)); })
#define _mm_mask_i64gather_pd(a, m, i, mask, s) __extension__ ({ \
__m128d __a = (a); \
@@ -1017,9 +1017,73 @@
(__v2di)__i, (__v4sf)__mask, (s)); })
#define _mm256_mask_i64gather_ps(a, m, i, mask, s) __extension__ ({ \
- __m256 __a = (a); \
+ __m128 __a = (a); \
float const *__m = (m); \
__m256i __i = (i); \
- __m256 __mask = (mask); \
- (__m256)__builtin_ia32_gatherq_ps256((__v8sf)__a, (const __v8sf *)__m, \
- (__v4di)__i, (__v8sf)__mask, (s)); })
+ __m128 __mask = (mask); \
+ (__m128)__builtin_ia32_gatherq_ps256((__v4sf)__a, (const __v4sf *)__m, \
+ (__v4di)__i, (__v4sf)__mask, (s)); })
+
+#define _mm_mask_i32gather_epi32(a, m, i, mask, s) __extension__ ({ \
+ __m128i __a = (a); \
+ int const *__m = (m); \
+ __m128i __i = (i); \
+ __m128i __mask = (mask); \
+ (__m128i)__builtin_ia32_gatherd_d((__v4si)__a, (const __v4si *)__m, \
+ (__v4si)__i, (__v4si)__mask, (s)); })
+
+#define _mm256_mask_i32gather_epi32(a, m, i, mask, s) __extension__ ({ \
+ __m256i __a = (a); \
+ int const *__m = (m); \
+ __m256i __i = (i); \
+ __m256i __mask = (mask); \
+ (__m256i)__builtin_ia32_gatherd_d256((__v8si)__a, (const __v8si *)__m, \
+ (__v8si)__i, (__v8si)__mask, (s)); })
+
+#define _mm_mask_i64gather_epi32(a, m, i, mask, s) __extension__ ({ \
+ __m128i __a = (a); \
+ int const *__m = (m); \
+ __m128i __i = (i); \
+ __m128i __mask = (mask); \
+ (__m128i)__builtin_ia32_gatherq_d((__v4si)__a, (const __v4si *)__m, \
+ (__v2di)__i, (__v4si)__mask, (s)); })
+
+#define _mm256_mask_i64gather_epi32(a, m, i, mask, s) __extension__ ({ \
+ __m128i __a = (a); \
+ int const *__m = (m); \
+ __m256i __i = (i); \
+ __m128i __mask = (mask); \
+ (__m128i)__builtin_ia32_gatherq_d256((__v4si)__a, (const __v4si *)__m, \
+ (__v4di)__i, (__v4si)__mask, (s)); })
+
+#define _mm_mask_i32gather_epi64(a, m, i, mask, s) __extension__ ({ \
+ __m128i __a = (a); \
+ int const *__m = (m); \
+ __m128i __i = (i); \
+ __m128i __mask = (mask); \
+ (__m128i)__builtin_ia32_gatherd_q((__v2di)__a, (const __v2di *)__m, \
+ (__v4si)__i, (__v2di)__mask, (s)); })
+
+#define _mm256_mask_i32gather_epi64(a, m, i, mask, s) __extension__ ({ \
+ __m256i __a = (a); \
+ int const *__m = (m); \
+ __m128i __i = (i); \
+ __m256i __mask = (mask); \
+ (__m256i)__builtin_ia32_gatherd_q256((__v4di)__a, (const __v4di *)__m, \
+ (__v4si)__i, (__v4di)__mask, (s)); })
+
+#define _mm_mask_i64gather_epi64(a, m, i, mask, s) __extension__ ({ \
+ __m128i __a = (a); \
+ int const *__m = (m); \
+ __m128i __i = (i); \
+ __m128i __mask = (mask); \
+ (__m128i)__builtin_ia32_gatherq_q((__v2di)__a, (const __v2di *)__m, \
+ (__v2di)__i, (__v2di)__mask, (s)); })
+
+#define _mm256_mask_i64gather_epi64(a, m, i, mask, s) __extension__ ({ \
+ __m256i __a = (a); \
+ int const *__m = (m); \
+ __m256i __i = (i); \
+ __m256i __mask = (mask); \
+ (__m256i)__builtin_ia32_gatherq_q256((__v4di)__a, (const __v4di *)__m, \
+ (__v4di)__i, (__v4di)__mask, (s)); })
diff --git a/test/CodeGen/avx2-builtins.c b/test/CodeGen/avx2-builtins.c
index c6953b3..86cc80a 100644
--- a/test/CodeGen/avx2-builtins.c
+++ b/test/CodeGen/avx2-builtins.c
@@ -10,6 +10,11 @@
return _mm256_mpsadbw_epu8(x, y, 3);
}
+__m256i test_mm256_sad_epu8(__m256i x, __m256i y) {
+ // CHECK: @llvm.x86.avx2.psad.bw
+ return _mm256_sad_epu8(x, y);
+}
+
__m256i test_mm256_abs_epi8(__m256i a) {
// CHECK: @llvm.x86.avx2.pabs.b
return _mm256_abs_epi8(a);
@@ -787,7 +792,7 @@
return _mm_mask_i32gather_pd(a, b, c, d, 2);
}
-__m256d test_mm256_mask_i32gather_pd(__m256d a, double const *b, __m256i c,
+__m256d test_mm256_mask_i32gather_pd(__m256d a, double const *b, __m128i c,
__m256d d) {
// CHECK: @llvm.x86.avx2.gather.d.pd.256
return _mm256_mask_i32gather_pd(a, b, c, d, 2);
@@ -818,8 +823,50 @@
// CHECK: @llvm.x86.avx2.gather.q.ps
return _mm_mask_i64gather_ps(a, b, c, d, 2);
}
-__m256 test_mm256_mask_i64gather_ps(__m256 a, float const *b, __m256i c,
- __m256 d) {
+__m128 test_mm256_mask_i64gather_ps(__m128 a, float const *b, __m256i c,
+ __m128 d) {
// CHECK: @llvm.x86.avx2.gather.q.ps.256
return _mm256_mask_i64gather_ps(a, b, c, d, 2);
}
+
+__m128i test_mm_mask_i32gather_epi32(__m128i a, int const *b, __m128i c,
+ __m128i d) {
+ // CHECK: @llvm.x86.avx2.gather.d.d
+ return _mm_mask_i32gather_epi32(a, b, c, d, 2);
+}
+__m256i test_mm256_mask_i32gather_epi32(__m256i a, int const *b, __m256i c,
+ __m256i d) {
+ // CHECK: @llvm.x86.avx2.gather.d.d.256
+ return _mm256_mask_i32gather_epi32(a, b, c, d, 2);
+}
+__m128i test_mm_mask_i64gather_epi32(__m128i a, int const *b, __m128i c,
+ __m128i d) {
+ // CHECK: @llvm.x86.avx2.gather.q.d
+ return _mm_mask_i64gather_epi32(a, b, c, d, 2);
+}
+__m128i test_mm256_mask_i64gather_epi32(__m128i a, int const *b, __m256i c,
+ __m128i d) {
+ // CHECK: @llvm.x86.avx2.gather.q.d.256
+ return _mm256_mask_i64gather_epi32(a, b, c, d, 2);
+}
+
+__m128i test_mm_mask_i32gather_epi64(__m128i a, int const *b, __m128i c,
+ __m128i d) {
+ // CHECK: @llvm.x86.avx2.gather.d.q
+ return _mm_mask_i32gather_epi64(a, b, c, d, 2);
+}
+__m256i test_mm256_mask_i32gather_epi64(__m256i a, int const *b, __m128i c,
+ __m256i d) {
+ // CHECK: @llvm.x86.avx2.gather.d.q.256
+ return _mm256_mask_i32gather_epi64(a, b, c, d, 2);
+}
+__m128i test_mm_mask_i64gather_epi64(__m128i a, int const *b, __m128i c,
+ __m128i d) {
+ // CHECK: @llvm.x86.avx2.gather.q.q
+ return _mm_mask_i64gather_epi64(a, b, c, d, 2);
+}
+__m256i test_mm256_mask_i64gather_epi64(__m256i a, int const *b, __m256i c,
+ __m256i d) {
+ // CHECK: @llvm.x86.avx2.gather.q.q.256
+ return _mm256_mask_i64gather_epi64(a, b, c, d, 2);
+}