SSE2/SSE4.1/AVX FP16->FP32 VCVT microkernels
PiperOrigin-RevId: 403670135
diff --git a/BUILD.bazel b/BUILD.bazel
index ec8ae08..bac7213 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -3685,6 +3685,14 @@
]
ALL_SSE2_MICROKERNEL_SRCS = [
+ "src/f16-f32-vcvt/gen/vcvt-sse2-int16-x8.c",
+ "src/f16-f32-vcvt/gen/vcvt-sse2-int16-x16.c",
+ "src/f16-f32-vcvt/gen/vcvt-sse2-int16-x24.c",
+ "src/f16-f32-vcvt/gen/vcvt-sse2-int16-x32.c",
+ "src/f16-f32-vcvt/gen/vcvt-sse2-int32-x8.c",
+ "src/f16-f32-vcvt/gen/vcvt-sse2-int32-x16.c",
+ "src/f16-f32-vcvt/gen/vcvt-sse2-int32-x24.c",
+ "src/f16-f32-vcvt/gen/vcvt-sse2-int32-x32.c",
"src/f32-argmaxpool/4x-sse2-c4.c",
"src/f32-argmaxpool/9p8x-sse2-c4.c",
"src/f32-argmaxpool/9x-sse2-c4.c",
@@ -4050,6 +4058,14 @@
]
ALL_SSE41_MICROKERNEL_SRCS = [
+ "src/f16-f32-vcvt/gen/vcvt-sse41-int16-x8.c",
+ "src/f16-f32-vcvt/gen/vcvt-sse41-int16-x16.c",
+ "src/f16-f32-vcvt/gen/vcvt-sse41-int16-x24.c",
+ "src/f16-f32-vcvt/gen/vcvt-sse41-int16-x32.c",
+ "src/f16-f32-vcvt/gen/vcvt-sse41-int32-x8.c",
+ "src/f16-f32-vcvt/gen/vcvt-sse41-int32-x16.c",
+ "src/f16-f32-vcvt/gen/vcvt-sse41-int32-x24.c",
+ "src/f16-f32-vcvt/gen/vcvt-sse41-int32-x32.c",
"src/f32-prelu/gen/sse41-2x4.c",
"src/f32-prelu/gen/sse41-2x8.c",
"src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x4.c",
@@ -4360,6 +4376,14 @@
]
ALL_AVX_MICROKERNEL_SRCS = [
+ "src/f16-f32-vcvt/gen/vcvt-avx-int16-x8.c",
+ "src/f16-f32-vcvt/gen/vcvt-avx-int16-x16.c",
+ "src/f16-f32-vcvt/gen/vcvt-avx-int16-x24.c",
+ "src/f16-f32-vcvt/gen/vcvt-avx-int16-x32.c",
+ "src/f16-f32-vcvt/gen/vcvt-avx-int32-x8.c",
+ "src/f16-f32-vcvt/gen/vcvt-avx-int32-x16.c",
+ "src/f16-f32-vcvt/gen/vcvt-avx-int32-x24.c",
+ "src/f16-f32-vcvt/gen/vcvt-avx-int32-x32.c",
"src/f32-dwconv/gen/up8x4-minmax-avx-acc2.c",
"src/f32-dwconv/gen/up8x4-minmax-avx.c",
"src/f32-dwconv/gen/up8x9-minmax-avx-acc2.c",
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 0f8df27..57678c9 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -2727,6 +2727,14 @@
src/xx-pad/sse2.c)
SET(ALL_SSE2_MICROKERNEL_SRCS
+ src/f16-f32-vcvt/gen/vcvt-sse2-int16-x8.c
+ src/f16-f32-vcvt/gen/vcvt-sse2-int16-x16.c
+ src/f16-f32-vcvt/gen/vcvt-sse2-int16-x24.c
+ src/f16-f32-vcvt/gen/vcvt-sse2-int16-x32.c
+ src/f16-f32-vcvt/gen/vcvt-sse2-int32-x8.c
+ src/f16-f32-vcvt/gen/vcvt-sse2-int32-x16.c
+ src/f16-f32-vcvt/gen/vcvt-sse2-int32-x24.c
+ src/f16-f32-vcvt/gen/vcvt-sse2-int32-x32.c
src/f32-argmaxpool/4x-sse2-c4.c
src/f32-argmaxpool/9p8x-sse2-c4.c
src/f32-argmaxpool/9x-sse2-c4.c
@@ -3088,6 +3096,14 @@
src/s8-vclamp/sse41-x64.c)
SET(ALL_SSE41_MICROKERNEL_SRCS
+ src/f16-f32-vcvt/gen/vcvt-sse41-int16-x8.c
+ src/f16-f32-vcvt/gen/vcvt-sse41-int16-x16.c
+ src/f16-f32-vcvt/gen/vcvt-sse41-int16-x24.c
+ src/f16-f32-vcvt/gen/vcvt-sse41-int16-x32.c
+ src/f16-f32-vcvt/gen/vcvt-sse41-int32-x8.c
+ src/f16-f32-vcvt/gen/vcvt-sse41-int32-x16.c
+ src/f16-f32-vcvt/gen/vcvt-sse41-int32-x24.c
+ src/f16-f32-vcvt/gen/vcvt-sse41-int32-x32.c
src/f32-prelu/gen/sse41-2x4.c
src/f32-prelu/gen/sse41-2x8.c
src/f32-velu/gen/velu-sse41-rr2-lut16-p3-x4.c
@@ -3396,6 +3412,14 @@
src/x8-lut/gen/lut-avx-x64.c)
SET(ALL_AVX_MICROKERNEL_SRCS
+ src/f16-f32-vcvt/gen/vcvt-avx-int16-x8.c
+ src/f16-f32-vcvt/gen/vcvt-avx-int16-x16.c
+ src/f16-f32-vcvt/gen/vcvt-avx-int16-x24.c
+ src/f16-f32-vcvt/gen/vcvt-avx-int16-x32.c
+ src/f16-f32-vcvt/gen/vcvt-avx-int32-x8.c
+ src/f16-f32-vcvt/gen/vcvt-avx-int32-x16.c
+ src/f16-f32-vcvt/gen/vcvt-avx-int32-x24.c
+ src/f16-f32-vcvt/gen/vcvt-avx-int32-x32.c
src/f32-dwconv/gen/up8x4-minmax-avx-acc2.c
src/f32-dwconv/gen/up8x4-minmax-avx.c
src/f32-dwconv/gen/up8x9-minmax-avx-acc2.c
diff --git a/scripts/generate-f16-f32-vcvt.sh b/scripts/generate-f16-f32-vcvt.sh
index 35a39f2..e276b04 100755
--- a/scripts/generate-f16-f32-vcvt.sh
+++ b/scripts/generate-f16-f32-vcvt.sh
@@ -8,6 +8,37 @@
tools/xngen src/f16-f32-vcvt/neonfp16.c.in -D BATCH_TILE=8 -o src/f16-f32-vcvt/gen/vcvt-neonfp16-x8.c &
tools/xngen src/f16-f32-vcvt/neonfp16.c.in -D BATCH_TILE=16 -o src/f16-f32-vcvt/gen/vcvt-neonfp16-x16.c &
+################################# x86 128-bit #################################
+tools/xngen src/f16-f32-vcvt/sse-int16.c.in -D SSE=2 -D AVX=0 -D BATCH_TILE=8 -o src/f16-f32-vcvt/gen/vcvt-sse2-int16-x8.c &
+tools/xngen src/f16-f32-vcvt/sse-int16.c.in -D SSE=2 -D AVX=0 -D BATCH_TILE=16 -o src/f16-f32-vcvt/gen/vcvt-sse2-int16-x16.c &
+tools/xngen src/f16-f32-vcvt/sse-int16.c.in -D SSE=2 -D AVX=0 -D BATCH_TILE=24 -o src/f16-f32-vcvt/gen/vcvt-sse2-int16-x24.c &
+tools/xngen src/f16-f32-vcvt/sse-int16.c.in -D SSE=2 -D AVX=0 -D BATCH_TILE=32 -o src/f16-f32-vcvt/gen/vcvt-sse2-int16-x32.c &
+
+tools/xngen src/f16-f32-vcvt/sse-int32.c.in -D SSE=2 -D AVX=0 -D BATCH_TILE=8 -o src/f16-f32-vcvt/gen/vcvt-sse2-int32-x8.c &
+tools/xngen src/f16-f32-vcvt/sse-int32.c.in -D SSE=2 -D AVX=0 -D BATCH_TILE=16 -o src/f16-f32-vcvt/gen/vcvt-sse2-int32-x16.c &
+tools/xngen src/f16-f32-vcvt/sse-int32.c.in -D SSE=2 -D AVX=0 -D BATCH_TILE=24 -o src/f16-f32-vcvt/gen/vcvt-sse2-int32-x24.c &
+tools/xngen src/f16-f32-vcvt/sse-int32.c.in -D SSE=2 -D AVX=0 -D BATCH_TILE=32 -o src/f16-f32-vcvt/gen/vcvt-sse2-int32-x32.c &
+
+tools/xngen src/f16-f32-vcvt/sse-int16.c.in -D SSE=4 -D AVX=0 -D BATCH_TILE=8 -o src/f16-f32-vcvt/gen/vcvt-sse41-int16-x8.c &
+tools/xngen src/f16-f32-vcvt/sse-int16.c.in -D SSE=4 -D AVX=0 -D BATCH_TILE=16 -o src/f16-f32-vcvt/gen/vcvt-sse41-int16-x16.c &
+tools/xngen src/f16-f32-vcvt/sse-int16.c.in -D SSE=4 -D AVX=0 -D BATCH_TILE=24 -o src/f16-f32-vcvt/gen/vcvt-sse41-int16-x24.c &
+tools/xngen src/f16-f32-vcvt/sse-int16.c.in -D SSE=4 -D AVX=0 -D BATCH_TILE=32 -o src/f16-f32-vcvt/gen/vcvt-sse41-int16-x32.c &
+
+tools/xngen src/f16-f32-vcvt/sse-int32.c.in -D SSE=4 -D AVX=0 -D BATCH_TILE=8 -o src/f16-f32-vcvt/gen/vcvt-sse41-int32-x8.c &
+tools/xngen src/f16-f32-vcvt/sse-int32.c.in -D SSE=4 -D AVX=0 -D BATCH_TILE=16 -o src/f16-f32-vcvt/gen/vcvt-sse41-int32-x16.c &
+tools/xngen src/f16-f32-vcvt/sse-int32.c.in -D SSE=4 -D AVX=0 -D BATCH_TILE=24 -o src/f16-f32-vcvt/gen/vcvt-sse41-int32-x24.c &
+tools/xngen src/f16-f32-vcvt/sse-int32.c.in -D SSE=4 -D AVX=0 -D BATCH_TILE=32 -o src/f16-f32-vcvt/gen/vcvt-sse41-int32-x32.c &
+
+tools/xngen src/f16-f32-vcvt/sse-int16.c.in -D SSE=4 -D AVX=1 -D BATCH_TILE=8 -o src/f16-f32-vcvt/gen/vcvt-avx-int16-x8.c &
+tools/xngen src/f16-f32-vcvt/sse-int16.c.in -D SSE=4 -D AVX=1 -D BATCH_TILE=16 -o src/f16-f32-vcvt/gen/vcvt-avx-int16-x16.c &
+tools/xngen src/f16-f32-vcvt/sse-int16.c.in -D SSE=4 -D AVX=1 -D BATCH_TILE=24 -o src/f16-f32-vcvt/gen/vcvt-avx-int16-x24.c &
+tools/xngen src/f16-f32-vcvt/sse-int16.c.in -D SSE=4 -D AVX=1 -D BATCH_TILE=32 -o src/f16-f32-vcvt/gen/vcvt-avx-int16-x32.c &
+
+tools/xngen src/f16-f32-vcvt/sse-int32.c.in -D SSE=4 -D AVX=1 -D BATCH_TILE=8 -o src/f16-f32-vcvt/gen/vcvt-avx-int32-x8.c &
+tools/xngen src/f16-f32-vcvt/sse-int32.c.in -D SSE=4 -D AVX=1 -D BATCH_TILE=16 -o src/f16-f32-vcvt/gen/vcvt-avx-int32-x16.c &
+tools/xngen src/f16-f32-vcvt/sse-int32.c.in -D SSE=4 -D AVX=1 -D BATCH_TILE=24 -o src/f16-f32-vcvt/gen/vcvt-avx-int32-x24.c &
+tools/xngen src/f16-f32-vcvt/sse-int32.c.in -D SSE=4 -D AVX=1 -D BATCH_TILE=32 -o src/f16-f32-vcvt/gen/vcvt-avx-int32-x32.c &
+
################################# x86 256-bit #################################
tools/xngen src/f16-f32-vcvt/f16c.c.in -D BATCH_TILE=8 -o src/f16-f32-vcvt/gen/vcvt-f16c-x8.c &
tools/xngen src/f16-f32-vcvt/f16c.c.in -D BATCH_TILE=16 -o src/f16-f32-vcvt/gen/vcvt-f16c-x16.c &
diff --git a/src/f16-f32-vcvt/avx512skx.c.in b/src/f16-f32-vcvt/avx512skx.c.in
index 307472e..7903184 100644
--- a/src/f16-f32-vcvt/avx512skx.c.in
+++ b/src/f16-f32-vcvt/avx512skx.c.in
@@ -32,12 +32,12 @@
for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
const __m512 vacc${ABC[0]} = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i*) i));
$for N in range(1, SIMD_TILE):
- const __m512 vacc${ABC[N]} = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i*) (i + ${N})));
+ const __m512 vacc${ABC[N]} = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i*) (i + ${N * 16})));
i += ${BATCH_TILE};
_mm512_storeu_ps(output, vacc${ABC[0]});
$for N in range(1, SIMD_TILE):
- _mm512_storeu_ps(output + ${N}, vacc${ABC[N]});
+ _mm512_storeu_ps(output + ${N * 16}, vacc${ABC[N]});
output += ${BATCH_TILE};
}
for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
diff --git a/src/f16-f32-vcvt/gen/vcvt-avx-int16-x16.c b/src/f16-f32-vcvt/gen/vcvt-avx-int16-x16.c
new file mode 100644
index 0000000..b8f3835
--- /dev/null
+++ b/src/f16-f32-vcvt/gen/vcvt-avx-int16-x16.c
@@ -0,0 +1,148 @@
+// Auto-generated file. Do not edit!
+// Template: src/f16-f32-vcvt/sse-int16.c.in
+// Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vcvt.h>
+
+
+void xnn_f16_f32_vcvt_ukernel__avx_int16_x16(
+ size_t n,
+ const void* input,
+ float* output,
+ const void* params)
+{
+ assert(n != 0);
+ assert(n % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(output != NULL);
+
+ const __m128i vsign_mask = _mm_set1_epi16(0x8000);
+ const __m128i vexp_offset = _mm_set1_epi16(0x7000);
+ const __m128 vexp_scale = _mm_set1_ps(0x1.0p-112f);
+ const __m128i vmagic_mask = _mm_set1_epi16(0x3F00);
+ const __m128 vmagic_bias = _mm_set1_ps(0.5f);
+ const __m128i vdenorm_cutoff = _mm_set1_epi16(0x0400);
+
+ const uint16_t* i = (const uint16_t*) input;
+ for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+ const __m128i vh0 = _mm_loadu_si128((const __m128i*) i);
+ const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8));
+ i += 16;
+
+ const __m128i vsign0 = _mm_and_si128(vh0, vsign_mask);
+ const __m128i vsign1 = _mm_and_si128(vh1, vsign_mask);
+
+ const __m128i vnonsign0 = _mm_xor_si128(vh0, vsign0);
+ const __m128i vnonsign1 = _mm_xor_si128(vh1, vsign1);
+
+ const __m128i vprenorm0 = _mm_slli_epi16(vnonsign0, 13);
+ const __m128i vprenorm1 = _mm_add_epi16(_mm_srli_epi16(vnonsign0, 3), vexp_offset);
+ const __m128i vprenorm2 = _mm_slli_epi16(vnonsign1, 13);
+ const __m128i vprenorm3 = _mm_add_epi16(_mm_srli_epi16(vnonsign1, 3), vexp_offset);
+
+ const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm0, vprenorm1)), vexp_scale));
+ const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm0, vprenorm1)), vexp_scale));
+ const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm2, vprenorm3)), vexp_scale));
+ const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm2, vprenorm3)), vexp_scale));
+
+ const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign0, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign0, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign1, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign1, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask0 = _mm_cmpgt_epi16(vnonsign0, vdenorm_cutoff);
+ const __m128i vmask1 = _mm_cmpgt_epi16(vnonsign1, vdenorm_cutoff);
+
+ const __m128i vf0 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign0),
+ _mm_blendv_epi8(vdenorm0, vnorm0, _mm_cvtepi16_epi32(vmask0)));
+ const __m128i vf1 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign0),
+ _mm_blendv_epi8(vdenorm1, vnorm1, _mm_unpackhi_epi16(vmask0, vmask0)));
+ const __m128i vf2 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign1),
+ _mm_blendv_epi8(vdenorm2, vnorm2, _mm_cvtepi16_epi32(vmask1)));
+ const __m128i vf3 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign1),
+ _mm_blendv_epi8(vdenorm3, vnorm3, _mm_unpackhi_epi16(vmask1, vmask1)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf0));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1));
+ _mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2));
+ _mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3));
+ output += 16;
+ }
+ for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+ i += 8;
+
+ const __m128i vsign = _mm_and_si128(vh, vsign_mask);
+
+ const __m128i vnonsign = _mm_xor_si128(vh, vsign);
+
+ const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
+ const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
+
+ const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask)));
+
+ const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
+ output += 8;
+ }
+ if XNN_UNPREDICTABLE(n != 0) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+
+ const __m128i vsign = _mm_and_si128(vh, vsign_mask);
+
+ const __m128i vnonsign = _mm_xor_si128(vh, vsign);
+
+ const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
+ const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
+
+ __m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask)));
+
+ if (n & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf));
+ output += 4;
+
+ vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask)));
+ }
+ if (n & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
+ output += 2;
+
+ vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
+ }
+ if (n & (1 * sizeof(float))) {
+ _mm_store_ss(output, _mm_castsi128_ps(vf));
+ }
+ }
+}
diff --git a/src/f16-f32-vcvt/gen/vcvt-avx-int16-x24.c b/src/f16-f32-vcvt/gen/vcvt-avx-int16-x24.c
new file mode 100644
index 0000000..c66a41e
--- /dev/null
+++ b/src/f16-f32-vcvt/gen/vcvt-avx-int16-x24.c
@@ -0,0 +1,164 @@
+// Auto-generated file. Do not edit!
+// Template: src/f16-f32-vcvt/sse-int16.c.in
+// Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vcvt.h>
+
+
+void xnn_f16_f32_vcvt_ukernel__avx_int16_x24(
+ size_t n,
+ const void* input,
+ float* output,
+ const void* params)
+{
+ assert(n != 0);
+ assert(n % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(output != NULL);
+
+ const __m128i vsign_mask = _mm_set1_epi16(0x8000);
+ const __m128i vexp_offset = _mm_set1_epi16(0x7000);
+ const __m128 vexp_scale = _mm_set1_ps(0x1.0p-112f);
+ const __m128i vmagic_mask = _mm_set1_epi16(0x3F00);
+ const __m128 vmagic_bias = _mm_set1_ps(0.5f);
+ const __m128i vdenorm_cutoff = _mm_set1_epi16(0x0400);
+
+ const uint16_t* i = (const uint16_t*) input;
+ for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
+ const __m128i vh0 = _mm_loadu_si128((const __m128i*) i);
+ const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8));
+ const __m128i vh2 = _mm_loadu_si128((const __m128i*) (i + 16));
+ i += 24;
+
+ const __m128i vsign0 = _mm_and_si128(vh0, vsign_mask);
+ const __m128i vsign1 = _mm_and_si128(vh1, vsign_mask);
+ const __m128i vsign2 = _mm_and_si128(vh2, vsign_mask);
+
+ const __m128i vnonsign0 = _mm_xor_si128(vh0, vsign0);
+ const __m128i vnonsign1 = _mm_xor_si128(vh1, vsign1);
+ const __m128i vnonsign2 = _mm_xor_si128(vh2, vsign2);
+
+ const __m128i vprenorm0 = _mm_slli_epi16(vnonsign0, 13);
+ const __m128i vprenorm1 = _mm_add_epi16(_mm_srli_epi16(vnonsign0, 3), vexp_offset);
+ const __m128i vprenorm2 = _mm_slli_epi16(vnonsign1, 13);
+ const __m128i vprenorm3 = _mm_add_epi16(_mm_srli_epi16(vnonsign1, 3), vexp_offset);
+ const __m128i vprenorm4 = _mm_slli_epi16(vnonsign2, 13);
+ const __m128i vprenorm5 = _mm_add_epi16(_mm_srli_epi16(vnonsign2, 3), vexp_offset);
+
+ const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm0, vprenorm1)), vexp_scale));
+ const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm0, vprenorm1)), vexp_scale));
+ const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm2, vprenorm3)), vexp_scale));
+ const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm2, vprenorm3)), vexp_scale));
+ const __m128i vnorm4 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm4, vprenorm5)), vexp_scale));
+ const __m128i vnorm5 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm4, vprenorm5)), vexp_scale));
+
+ const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign0, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign0, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign1, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign1, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm4 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign2, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm5 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign2, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask0 = _mm_cmpgt_epi16(vnonsign0, vdenorm_cutoff);
+ const __m128i vmask1 = _mm_cmpgt_epi16(vnonsign1, vdenorm_cutoff);
+ const __m128i vmask2 = _mm_cmpgt_epi16(vnonsign2, vdenorm_cutoff);
+
+ const __m128i vf0 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign0),
+ _mm_blendv_epi8(vdenorm0, vnorm0, _mm_cvtepi16_epi32(vmask0)));
+ const __m128i vf1 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign0),
+ _mm_blendv_epi8(vdenorm1, vnorm1, _mm_unpackhi_epi16(vmask0, vmask0)));
+ const __m128i vf2 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign1),
+ _mm_blendv_epi8(vdenorm2, vnorm2, _mm_cvtepi16_epi32(vmask1)));
+ const __m128i vf3 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign1),
+ _mm_blendv_epi8(vdenorm3, vnorm3, _mm_unpackhi_epi16(vmask1, vmask1)));
+ const __m128i vf4 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign2),
+ _mm_blendv_epi8(vdenorm4, vnorm4, _mm_cvtepi16_epi32(vmask2)));
+ const __m128i vf5 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign2),
+ _mm_blendv_epi8(vdenorm5, vnorm5, _mm_unpackhi_epi16(vmask2, vmask2)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf0));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1));
+ _mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2));
+ _mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3));
+ _mm_storeu_ps(output + 16, _mm_castsi128_ps(vf4));
+ _mm_storeu_ps(output + 20, _mm_castsi128_ps(vf5));
+ output += 24;
+ }
+ for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+ i += 8;
+
+ const __m128i vsign = _mm_and_si128(vh, vsign_mask);
+
+ const __m128i vnonsign = _mm_xor_si128(vh, vsign);
+
+ const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
+ const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
+
+ const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask)));
+
+ const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
+ output += 8;
+ }
+ if XNN_UNPREDICTABLE(n != 0) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+
+ const __m128i vsign = _mm_and_si128(vh, vsign_mask);
+
+ const __m128i vnonsign = _mm_xor_si128(vh, vsign);
+
+ const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
+ const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
+
+ __m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask)));
+
+ if (n & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf));
+ output += 4;
+
+ vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask)));
+ }
+ if (n & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
+ output += 2;
+
+ vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
+ }
+ if (n & (1 * sizeof(float))) {
+ _mm_store_ss(output, _mm_castsi128_ps(vf));
+ }
+ }
+}
diff --git a/src/f16-f32-vcvt/gen/vcvt-avx-int16-x32.c b/src/f16-f32-vcvt/gen/vcvt-avx-int16-x32.c
new file mode 100644
index 0000000..a87e568
--- /dev/null
+++ b/src/f16-f32-vcvt/gen/vcvt-avx-int16-x32.c
@@ -0,0 +1,180 @@
+// Auto-generated file. Do not edit!
+// Template: src/f16-f32-vcvt/sse-int16.c.in
+// Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vcvt.h>
+
+
+void xnn_f16_f32_vcvt_ukernel__avx_int16_x32(
+ size_t n,
+ const void* input,
+ float* output,
+ const void* params)
+{
+ assert(n != 0);
+ assert(n % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(output != NULL);
+
+ const __m128i vsign_mask = _mm_set1_epi16(0x8000);
+ const __m128i vexp_offset = _mm_set1_epi16(0x7000);
+ const __m128 vexp_scale = _mm_set1_ps(0x1.0p-112f);
+ const __m128i vmagic_mask = _mm_set1_epi16(0x3F00);
+ const __m128 vmagic_bias = _mm_set1_ps(0.5f);
+ const __m128i vdenorm_cutoff = _mm_set1_epi16(0x0400);
+
+ const uint16_t* i = (const uint16_t*) input;
+ for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
+ const __m128i vh0 = _mm_loadu_si128((const __m128i*) i);
+ const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8));
+ const __m128i vh2 = _mm_loadu_si128((const __m128i*) (i + 16));
+ const __m128i vh3 = _mm_loadu_si128((const __m128i*) (i + 24));
+ i += 32;
+
+ const __m128i vsign0 = _mm_and_si128(vh0, vsign_mask);
+ const __m128i vsign1 = _mm_and_si128(vh1, vsign_mask);
+ const __m128i vsign2 = _mm_and_si128(vh2, vsign_mask);
+ const __m128i vsign3 = _mm_and_si128(vh3, vsign_mask);
+
+ const __m128i vnonsign0 = _mm_xor_si128(vh0, vsign0);
+ const __m128i vnonsign1 = _mm_xor_si128(vh1, vsign1);
+ const __m128i vnonsign2 = _mm_xor_si128(vh2, vsign2);
+ const __m128i vnonsign3 = _mm_xor_si128(vh3, vsign3);
+
+ const __m128i vprenorm0 = _mm_slli_epi16(vnonsign0, 13);
+ const __m128i vprenorm1 = _mm_add_epi16(_mm_srli_epi16(vnonsign0, 3), vexp_offset);
+ const __m128i vprenorm2 = _mm_slli_epi16(vnonsign1, 13);
+ const __m128i vprenorm3 = _mm_add_epi16(_mm_srli_epi16(vnonsign1, 3), vexp_offset);
+ const __m128i vprenorm4 = _mm_slli_epi16(vnonsign2, 13);
+ const __m128i vprenorm5 = _mm_add_epi16(_mm_srli_epi16(vnonsign2, 3), vexp_offset);
+ const __m128i vprenorm6 = _mm_slli_epi16(vnonsign3, 13);
+ const __m128i vprenorm7 = _mm_add_epi16(_mm_srli_epi16(vnonsign3, 3), vexp_offset);
+
+ const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm0, vprenorm1)), vexp_scale));
+ const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm0, vprenorm1)), vexp_scale));
+ const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm2, vprenorm3)), vexp_scale));
+ const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm2, vprenorm3)), vexp_scale));
+ const __m128i vnorm4 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm4, vprenorm5)), vexp_scale));
+ const __m128i vnorm5 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm4, vprenorm5)), vexp_scale));
+ const __m128i vnorm6 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm6, vprenorm7)), vexp_scale));
+ const __m128i vnorm7 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm6, vprenorm7)), vexp_scale));
+
+ const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign0, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign0, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign1, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign1, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm4 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign2, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm5 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign2, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm6 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign3, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm7 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign3, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask0 = _mm_cmpgt_epi16(vnonsign0, vdenorm_cutoff);
+ const __m128i vmask1 = _mm_cmpgt_epi16(vnonsign1, vdenorm_cutoff);
+ const __m128i vmask2 = _mm_cmpgt_epi16(vnonsign2, vdenorm_cutoff);
+ const __m128i vmask3 = _mm_cmpgt_epi16(vnonsign3, vdenorm_cutoff);
+
+ const __m128i vf0 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign0),
+ _mm_blendv_epi8(vdenorm0, vnorm0, _mm_cvtepi16_epi32(vmask0)));
+ const __m128i vf1 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign0),
+ _mm_blendv_epi8(vdenorm1, vnorm1, _mm_unpackhi_epi16(vmask0, vmask0)));
+ const __m128i vf2 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign1),
+ _mm_blendv_epi8(vdenorm2, vnorm2, _mm_cvtepi16_epi32(vmask1)));
+ const __m128i vf3 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign1),
+ _mm_blendv_epi8(vdenorm3, vnorm3, _mm_unpackhi_epi16(vmask1, vmask1)));
+ const __m128i vf4 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign2),
+ _mm_blendv_epi8(vdenorm4, vnorm4, _mm_cvtepi16_epi32(vmask2)));
+ const __m128i vf5 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign2),
+ _mm_blendv_epi8(vdenorm5, vnorm5, _mm_unpackhi_epi16(vmask2, vmask2)));
+ const __m128i vf6 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign3),
+ _mm_blendv_epi8(vdenorm6, vnorm6, _mm_cvtepi16_epi32(vmask3)));
+ const __m128i vf7 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign3),
+ _mm_blendv_epi8(vdenorm7, vnorm7, _mm_unpackhi_epi16(vmask3, vmask3)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf0));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1));
+ _mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2));
+ _mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3));
+ _mm_storeu_ps(output + 16, _mm_castsi128_ps(vf4));
+ _mm_storeu_ps(output + 20, _mm_castsi128_ps(vf5));
+ _mm_storeu_ps(output + 24, _mm_castsi128_ps(vf6));
+ _mm_storeu_ps(output + 28, _mm_castsi128_ps(vf7));
+ output += 32;
+ }
+ for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+ i += 8;
+
+ const __m128i vsign = _mm_and_si128(vh, vsign_mask);
+
+ const __m128i vnonsign = _mm_xor_si128(vh, vsign);
+
+ const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
+ const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
+
+ const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask)));
+
+ const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
+ output += 8;
+ }
+ if XNN_UNPREDICTABLE(n != 0) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+
+ const __m128i vsign = _mm_and_si128(vh, vsign_mask);
+
+ const __m128i vnonsign = _mm_xor_si128(vh, vsign);
+
+ const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
+ const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
+
+ __m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask)));
+
+ if (n & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf));
+ output += 4;
+
+ vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask)));
+ }
+ if (n & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
+ output += 2;
+
+ vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
+ }
+ if (n & (1 * sizeof(float))) {
+ _mm_store_ss(output, _mm_castsi128_ps(vf));
+ }
+ }
+}
diff --git a/src/f16-f32-vcvt/gen/vcvt-avx-int16-x8.c b/src/f16-f32-vcvt/gen/vcvt-avx-int16-x8.c
new file mode 100644
index 0000000..5640dae
--- /dev/null
+++ b/src/f16-f32-vcvt/gen/vcvt-avx-int16-x8.c
@@ -0,0 +1,104 @@
+// Auto-generated file. Do not edit!
+// Template: src/f16-f32-vcvt/sse-int16.c.in
+// Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vcvt.h>
+
+
+void xnn_f16_f32_vcvt_ukernel__avx_int16_x8(
+ size_t n,
+ const void* input,
+ float* output,
+ const void* params)
+{
+ assert(n != 0);
+ assert(n % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(output != NULL);
+
+ const __m128i vsign_mask = _mm_set1_epi16(0x8000);
+ const __m128i vexp_offset = _mm_set1_epi16(0x7000);
+ const __m128 vexp_scale = _mm_set1_ps(0x1.0p-112f);
+ const __m128i vmagic_mask = _mm_set1_epi16(0x3F00);
+ const __m128 vmagic_bias = _mm_set1_ps(0.5f);
+ const __m128i vdenorm_cutoff = _mm_set1_epi16(0x0400);
+
+ const uint16_t* i = (const uint16_t*) input;
+ for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+ i += 8;
+
+ const __m128i vsign = _mm_and_si128(vh, vsign_mask);
+
+ const __m128i vnonsign = _mm_xor_si128(vh, vsign);
+
+ const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
+ const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
+
+ const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask)));
+
+ const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
+ output += 8;
+ }
+ if XNN_UNPREDICTABLE(n != 0) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+
+ const __m128i vsign = _mm_and_si128(vh, vsign_mask);
+
+ const __m128i vnonsign = _mm_xor_si128(vh, vsign);
+
+ const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
+ const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
+
+ __m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask)));
+
+ if (n & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf));
+ output += 4;
+
+ vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask)));
+ }
+ if (n & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
+ output += 2;
+
+ vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
+ }
+ if (n & (1 * sizeof(float))) {
+ _mm_store_ss(output, _mm_castsi128_ps(vf));
+ }
+ }
+}
diff --git a/src/f16-f32-vcvt/gen/vcvt-avx-int32-x16.c b/src/f16-f32-vcvt/gen/vcvt-avx-int32-x16.c
new file mode 100644
index 0000000..1ce08cc
--- /dev/null
+++ b/src/f16-f32-vcvt/gen/vcvt-avx-int32-x16.c
@@ -0,0 +1,150 @@
+// Auto-generated file. Do not edit!
+// Template: src/f16-f32-vcvt/sse-int32.c.in
+// Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vcvt.h>
+
+
+void xnn_f16_f32_vcvt_ukernel__avx_int32_x16(
+ size_t n,
+ const void* input,
+ float* output,
+ const void* params)
+{
+ assert(n != 0);
+ assert(n % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(output != NULL);
+
+ const __m128i vsign_mask = _mm_set1_epi32(0x80000000);
+ const __m128i vexp_offset = _mm_set1_epi32(0x70000000);
+ const __m128 vexp_scale = _mm_set1_ps(0x1.0p-112f);
+ const __m128i vmagic_mask = _mm_set1_epi32(0x3F000000);
+ const __m128 vmagic_bias = _mm_set1_ps(0.5f);
+ const __m128i vdenorm_cutoff = _mm_set1_epi32(0x04000000);
+
+ const uint16_t* i = (const uint16_t*) input;
+ for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+ const __m128i vh0 = _mm_loadu_si128((const __m128i*) i);
+ const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8));
+ i += 16;
+
+ const __m128i vw0 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh0);
+ const __m128i vw1 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh0);
+ const __m128i vw2 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh1);
+ const __m128i vw3 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh1);
+
+ const __m128i vsign0 = _mm_and_si128(vw0, vsign_mask);
+ const __m128i vsign1 = _mm_and_si128(vw1, vsign_mask);
+ const __m128i vsign2 = _mm_and_si128(vw2, vsign_mask);
+ const __m128i vsign3 = _mm_and_si128(vw3, vsign_mask);
+
+ const __m128i vnonsign0 = _mm_xor_si128(vw0, vsign0);
+ const __m128i vnonsign1 = _mm_xor_si128(vw1, vsign1);
+ const __m128i vnonsign2 = _mm_xor_si128(vw2, vsign2);
+ const __m128i vnonsign3 = _mm_xor_si128(vw3, vsign3);
+
+ const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign0, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign1, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign2, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign3, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign0, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign1, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign2, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign3, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask0 = _mm_cmpgt_epi32(vnonsign0, vdenorm_cutoff);
+ const __m128i vmask1 = _mm_cmpgt_epi32(vnonsign1, vdenorm_cutoff);
+ const __m128i vmask2 = _mm_cmpgt_epi32(vnonsign2, vdenorm_cutoff);
+ const __m128i vmask3 = _mm_cmpgt_epi32(vnonsign3, vdenorm_cutoff);
+
+ const __m128i vf0 = _mm_or_si128(vsign0, _mm_blendv_epi8(vdenorm0, vnorm0, vmask0));
+ const __m128i vf1 = _mm_or_si128(vsign1, _mm_blendv_epi8(vdenorm1, vnorm1, vmask1));
+ const __m128i vf2 = _mm_or_si128(vsign2, _mm_blendv_epi8(vdenorm2, vnorm2, vmask2));
+ const __m128i vf3 = _mm_or_si128(vsign3, _mm_blendv_epi8(vdenorm3, vnorm3, vmask3));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf0));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1));
+ _mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2));
+ _mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3));
+ output += 16;
+ }
+ for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+ i += 8;
+
+ const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
+ const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
+
+ const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
+ const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
+
+ const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
+ const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
+ const __m128i vf_lo = _mm_or_si128(vsign_lo, _mm_blendv_epi8(vdenorm_lo, vnorm_lo, vmask_lo));
+
+ const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
+ const __m128i vf_hi = _mm_or_si128(vsign_hi, _mm_blendv_epi8(vdenorm_hi, vnorm_hi, vmask_hi));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
+ output += 8;
+ }
+ if XNN_UNPREDICTABLE(n != 0) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+
+ const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
+ const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
+
+ const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
+ const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
+
+ const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
+ const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
+ __m128i vf = _mm_or_si128(vsign_lo, _mm_blendv_epi8(vdenorm_lo, vnorm_lo, vmask_lo));
+
+ if (n & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf));
+ output += 4;
+
+ const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
+ vf = _mm_or_si128(vsign_hi, _mm_blendv_epi8(vdenorm_hi, vnorm_hi, vmask_hi));
+ }
+ if (n & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
+ output += 2;
+
+ vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
+ }
+ if (n & (1 * sizeof(float))) {
+ _mm_store_ss(output, _mm_castsi128_ps(vf));
+ }
+ }
+}
diff --git a/src/f16-f32-vcvt/gen/vcvt-avx-int32-x24.c b/src/f16-f32-vcvt/gen/vcvt-avx-int32-x24.c
new file mode 100644
index 0000000..f3cb237
--- /dev/null
+++ b/src/f16-f32-vcvt/gen/vcvt-avx-int32-x24.c
@@ -0,0 +1,167 @@
+// Auto-generated file. Do not edit!
+// Template: src/f16-f32-vcvt/sse-int32.c.in
+// Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vcvt.h>
+
+
+void xnn_f16_f32_vcvt_ukernel__avx_int32_x24(
+ size_t n,
+ const void* input,
+ float* output,
+ const void* params)
+{
+ assert(n != 0);
+ assert(n % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(output != NULL);
+
+ const __m128i vsign_mask = _mm_set1_epi32(0x80000000);
+ const __m128i vexp_offset = _mm_set1_epi32(0x70000000);
+ const __m128 vexp_scale = _mm_set1_ps(0x1.0p-112f);
+ const __m128i vmagic_mask = _mm_set1_epi32(0x3F000000);
+ const __m128 vmagic_bias = _mm_set1_ps(0.5f);
+ const __m128i vdenorm_cutoff = _mm_set1_epi32(0x04000000);
+
+ const uint16_t* i = (const uint16_t*) input;
+ for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
+ const __m128i vh0 = _mm_loadu_si128((const __m128i*) i);
+ const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8));
+ const __m128i vh2 = _mm_loadu_si128((const __m128i*) (i + 16));
+ i += 24;
+
+ const __m128i vw0 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh0);
+ const __m128i vw1 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh0);
+ const __m128i vw2 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh1);
+ const __m128i vw3 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh1);
+ const __m128i vw4 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh2);
+ const __m128i vw5 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh2);
+
+ const __m128i vsign0 = _mm_and_si128(vw0, vsign_mask);
+ const __m128i vsign1 = _mm_and_si128(vw1, vsign_mask);
+ const __m128i vsign2 = _mm_and_si128(vw2, vsign_mask);
+ const __m128i vsign3 = _mm_and_si128(vw3, vsign_mask);
+ const __m128i vsign4 = _mm_and_si128(vw4, vsign_mask);
+ const __m128i vsign5 = _mm_and_si128(vw5, vsign_mask);
+
+ const __m128i vnonsign0 = _mm_xor_si128(vw0, vsign0);
+ const __m128i vnonsign1 = _mm_xor_si128(vw1, vsign1);
+ const __m128i vnonsign2 = _mm_xor_si128(vw2, vsign2);
+ const __m128i vnonsign3 = _mm_xor_si128(vw3, vsign3);
+ const __m128i vnonsign4 = _mm_xor_si128(vw4, vsign4);
+ const __m128i vnonsign5 = _mm_xor_si128(vw5, vsign5);
+
+ const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign0, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign1, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign2, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign3, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm4 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign4, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm5 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign5, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign0, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign1, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign2, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign3, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm4 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign4, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm5 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign5, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask0 = _mm_cmpgt_epi32(vnonsign0, vdenorm_cutoff);
+ const __m128i vmask1 = _mm_cmpgt_epi32(vnonsign1, vdenorm_cutoff);
+ const __m128i vmask2 = _mm_cmpgt_epi32(vnonsign2, vdenorm_cutoff);
+ const __m128i vmask3 = _mm_cmpgt_epi32(vnonsign3, vdenorm_cutoff);
+ const __m128i vmask4 = _mm_cmpgt_epi32(vnonsign4, vdenorm_cutoff);
+ const __m128i vmask5 = _mm_cmpgt_epi32(vnonsign5, vdenorm_cutoff);
+
+ const __m128i vf0 = _mm_or_si128(vsign0, _mm_blendv_epi8(vdenorm0, vnorm0, vmask0));
+ const __m128i vf1 = _mm_or_si128(vsign1, _mm_blendv_epi8(vdenorm1, vnorm1, vmask1));
+ const __m128i vf2 = _mm_or_si128(vsign2, _mm_blendv_epi8(vdenorm2, vnorm2, vmask2));
+ const __m128i vf3 = _mm_or_si128(vsign3, _mm_blendv_epi8(vdenorm3, vnorm3, vmask3));
+ const __m128i vf4 = _mm_or_si128(vsign4, _mm_blendv_epi8(vdenorm4, vnorm4, vmask4));
+ const __m128i vf5 = _mm_or_si128(vsign5, _mm_blendv_epi8(vdenorm5, vnorm5, vmask5));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf0));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1));
+ _mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2));
+ _mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3));
+ _mm_storeu_ps(output + 16, _mm_castsi128_ps(vf4));
+ _mm_storeu_ps(output + 20, _mm_castsi128_ps(vf5));
+ output += 24;
+ }
+ for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+ i += 8;
+
+ const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
+ const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
+
+ const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
+ const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
+
+ const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
+ const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
+ const __m128i vf_lo = _mm_or_si128(vsign_lo, _mm_blendv_epi8(vdenorm_lo, vnorm_lo, vmask_lo));
+
+ const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
+ const __m128i vf_hi = _mm_or_si128(vsign_hi, _mm_blendv_epi8(vdenorm_hi, vnorm_hi, vmask_hi));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
+ output += 8;
+ }
+ if XNN_UNPREDICTABLE(n != 0) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+
+ const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
+ const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
+
+ const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
+ const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
+
+ const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
+ const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
+ __m128i vf = _mm_or_si128(vsign_lo, _mm_blendv_epi8(vdenorm_lo, vnorm_lo, vmask_lo));
+
+ if (n & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf));
+ output += 4;
+
+ const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
+ vf = _mm_or_si128(vsign_hi, _mm_blendv_epi8(vdenorm_hi, vnorm_hi, vmask_hi));
+ }
+ if (n & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
+ output += 2;
+
+ vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
+ }
+ if (n & (1 * sizeof(float))) {
+ _mm_store_ss(output, _mm_castsi128_ps(vf));
+ }
+ }
+}
diff --git a/src/f16-f32-vcvt/gen/vcvt-avx-int32-x32.c b/src/f16-f32-vcvt/gen/vcvt-avx-int32-x32.c
new file mode 100644
index 0000000..bedef35
--- /dev/null
+++ b/src/f16-f32-vcvt/gen/vcvt-avx-int32-x32.c
@@ -0,0 +1,184 @@
+// Auto-generated file. Do not edit!
+// Template: src/f16-f32-vcvt/sse-int32.c.in
+// Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vcvt.h>
+
+
+void xnn_f16_f32_vcvt_ukernel__avx_int32_x32(
+ size_t n,
+ const void* input,
+ float* output,
+ const void* params)
+{
+ assert(n != 0);
+ assert(n % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(output != NULL);
+
+ const __m128i vsign_mask = _mm_set1_epi32(0x80000000);
+ const __m128i vexp_offset = _mm_set1_epi32(0x70000000);
+ const __m128 vexp_scale = _mm_set1_ps(0x1.0p-112f);
+ const __m128i vmagic_mask = _mm_set1_epi32(0x3F000000);
+ const __m128 vmagic_bias = _mm_set1_ps(0.5f);
+ const __m128i vdenorm_cutoff = _mm_set1_epi32(0x04000000);
+
+ const uint16_t* i = (const uint16_t*) input;
+ for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
+ const __m128i vh0 = _mm_loadu_si128((const __m128i*) i);
+ const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8));
+ const __m128i vh2 = _mm_loadu_si128((const __m128i*) (i + 16));
+ const __m128i vh3 = _mm_loadu_si128((const __m128i*) (i + 24));
+ i += 32;
+
+ const __m128i vw0 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh0);
+ const __m128i vw1 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh0);
+ const __m128i vw2 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh1);
+ const __m128i vw3 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh1);
+ const __m128i vw4 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh2);
+ const __m128i vw5 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh2);
+ const __m128i vw6 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh3);
+ const __m128i vw7 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh3);
+
+ const __m128i vsign0 = _mm_and_si128(vw0, vsign_mask);
+ const __m128i vsign1 = _mm_and_si128(vw1, vsign_mask);
+ const __m128i vsign2 = _mm_and_si128(vw2, vsign_mask);
+ const __m128i vsign3 = _mm_and_si128(vw3, vsign_mask);
+ const __m128i vsign4 = _mm_and_si128(vw4, vsign_mask);
+ const __m128i vsign5 = _mm_and_si128(vw5, vsign_mask);
+ const __m128i vsign6 = _mm_and_si128(vw6, vsign_mask);
+ const __m128i vsign7 = _mm_and_si128(vw7, vsign_mask);
+
+ const __m128i vnonsign0 = _mm_xor_si128(vw0, vsign0);
+ const __m128i vnonsign1 = _mm_xor_si128(vw1, vsign1);
+ const __m128i vnonsign2 = _mm_xor_si128(vw2, vsign2);
+ const __m128i vnonsign3 = _mm_xor_si128(vw3, vsign3);
+ const __m128i vnonsign4 = _mm_xor_si128(vw4, vsign4);
+ const __m128i vnonsign5 = _mm_xor_si128(vw5, vsign5);
+ const __m128i vnonsign6 = _mm_xor_si128(vw6, vsign6);
+ const __m128i vnonsign7 = _mm_xor_si128(vw7, vsign7);
+
+ const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign0, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign1, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign2, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign3, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm4 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign4, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm5 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign5, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm6 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign6, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm7 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign7, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign0, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign1, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign2, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign3, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm4 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign4, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm5 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign5, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm6 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign6, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm7 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign7, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask0 = _mm_cmpgt_epi32(vnonsign0, vdenorm_cutoff);
+ const __m128i vmask1 = _mm_cmpgt_epi32(vnonsign1, vdenorm_cutoff);
+ const __m128i vmask2 = _mm_cmpgt_epi32(vnonsign2, vdenorm_cutoff);
+ const __m128i vmask3 = _mm_cmpgt_epi32(vnonsign3, vdenorm_cutoff);
+ const __m128i vmask4 = _mm_cmpgt_epi32(vnonsign4, vdenorm_cutoff);
+ const __m128i vmask5 = _mm_cmpgt_epi32(vnonsign5, vdenorm_cutoff);
+ const __m128i vmask6 = _mm_cmpgt_epi32(vnonsign6, vdenorm_cutoff);
+ const __m128i vmask7 = _mm_cmpgt_epi32(vnonsign7, vdenorm_cutoff);
+
+ const __m128i vf0 = _mm_or_si128(vsign0, _mm_blendv_epi8(vdenorm0, vnorm0, vmask0));
+ const __m128i vf1 = _mm_or_si128(vsign1, _mm_blendv_epi8(vdenorm1, vnorm1, vmask1));
+ const __m128i vf2 = _mm_or_si128(vsign2, _mm_blendv_epi8(vdenorm2, vnorm2, vmask2));
+ const __m128i vf3 = _mm_or_si128(vsign3, _mm_blendv_epi8(vdenorm3, vnorm3, vmask3));
+ const __m128i vf4 = _mm_or_si128(vsign4, _mm_blendv_epi8(vdenorm4, vnorm4, vmask4));
+ const __m128i vf5 = _mm_or_si128(vsign5, _mm_blendv_epi8(vdenorm5, vnorm5, vmask5));
+ const __m128i vf6 = _mm_or_si128(vsign6, _mm_blendv_epi8(vdenorm6, vnorm6, vmask6));
+ const __m128i vf7 = _mm_or_si128(vsign7, _mm_blendv_epi8(vdenorm7, vnorm7, vmask7));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf0));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1));
+ _mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2));
+ _mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3));
+ _mm_storeu_ps(output + 16, _mm_castsi128_ps(vf4));
+ _mm_storeu_ps(output + 20, _mm_castsi128_ps(vf5));
+ _mm_storeu_ps(output + 24, _mm_castsi128_ps(vf6));
+ _mm_storeu_ps(output + 28, _mm_castsi128_ps(vf7));
+ output += 32;
+ }
+ for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+ i += 8;
+
+ const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
+ const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
+
+ const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
+ const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
+
+ const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
+ const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
+ const __m128i vf_lo = _mm_or_si128(vsign_lo, _mm_blendv_epi8(vdenorm_lo, vnorm_lo, vmask_lo));
+
+ const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
+ const __m128i vf_hi = _mm_or_si128(vsign_hi, _mm_blendv_epi8(vdenorm_hi, vnorm_hi, vmask_hi));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
+ output += 8;
+ }
+ if XNN_UNPREDICTABLE(n != 0) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+
+ const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
+ const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
+
+ const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
+ const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
+
+ const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
+ const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
+ __m128i vf = _mm_or_si128(vsign_lo, _mm_blendv_epi8(vdenorm_lo, vnorm_lo, vmask_lo));
+
+ if (n & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf));
+ output += 4;
+
+ const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
+ vf = _mm_or_si128(vsign_hi, _mm_blendv_epi8(vdenorm_hi, vnorm_hi, vmask_hi));
+ }
+ if (n & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
+ output += 2;
+
+ vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
+ }
+ if (n & (1 * sizeof(float))) {
+ _mm_store_ss(output, _mm_castsi128_ps(vf));
+ }
+ }
+}
diff --git a/src/f16-f32-vcvt/gen/vcvt-avx-int32-x8.c b/src/f16-f32-vcvt/gen/vcvt-avx-int32-x8.c
new file mode 100644
index 0000000..2f8457d
--- /dev/null
+++ b/src/f16-f32-vcvt/gen/vcvt-avx-int32-x8.c
@@ -0,0 +1,104 @@
+// Auto-generated file. Do not edit!
+// Template: src/f16-f32-vcvt/sse-int32.c.in
+// Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vcvt.h>
+
+
+void xnn_f16_f32_vcvt_ukernel__avx_int32_x8(
+ size_t n,
+ const void* input,
+ float* output,
+ const void* params)
+{
+ assert(n != 0);
+ assert(n % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(output != NULL);
+
+ const __m128i vsign_mask = _mm_set1_epi32(0x80000000);
+ const __m128i vexp_offset = _mm_set1_epi32(0x70000000);
+ const __m128 vexp_scale = _mm_set1_ps(0x1.0p-112f);
+ const __m128i vmagic_mask = _mm_set1_epi32(0x3F000000);
+ const __m128 vmagic_bias = _mm_set1_ps(0.5f);
+ const __m128i vdenorm_cutoff = _mm_set1_epi32(0x04000000);
+
+ const uint16_t* i = (const uint16_t*) input;
+ for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+ i += 8;
+
+ const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
+ const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
+
+ const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
+ const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
+
+ const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
+ const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
+ const __m128i vf_lo = _mm_or_si128(vsign_lo, _mm_blendv_epi8(vdenorm_lo, vnorm_lo, vmask_lo));
+
+ const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
+ const __m128i vf_hi = _mm_or_si128(vsign_hi, _mm_blendv_epi8(vdenorm_hi, vnorm_hi, vmask_hi));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
+ output += 8;
+ }
+ if XNN_UNPREDICTABLE(n != 0) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+
+ const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
+ const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
+
+ const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
+ const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
+
+ const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
+ const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
+ __m128i vf = _mm_or_si128(vsign_lo, _mm_blendv_epi8(vdenorm_lo, vnorm_lo, vmask_lo));
+
+ if (n & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf));
+ output += 4;
+
+ const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
+ vf = _mm_or_si128(vsign_hi, _mm_blendv_epi8(vdenorm_hi, vnorm_hi, vmask_hi));
+ }
+ if (n & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
+ output += 2;
+
+ vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
+ }
+ if (n & (1 * sizeof(float))) {
+ _mm_store_ss(output, _mm_castsi128_ps(vf));
+ }
+ }
+}
diff --git a/src/f16-f32-vcvt/gen/vcvt-avx512skx-x32.c b/src/f16-f32-vcvt/gen/vcvt-avx512skx-x32.c
index 41708e0..84885f2 100644
--- a/src/f16-f32-vcvt/gen/vcvt-avx512skx-x32.c
+++ b/src/f16-f32-vcvt/gen/vcvt-avx512skx-x32.c
@@ -30,11 +30,11 @@
const uint16_t* i = (const uint16_t*) input;
for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
const __m512 vacc0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i*) i));
- const __m512 vacc1 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i*) (i + 1)));
+ const __m512 vacc1 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i*) (i + 16)));
i += 32;
_mm512_storeu_ps(output, vacc0);
- _mm512_storeu_ps(output + 1, vacc1);
+ _mm512_storeu_ps(output + 16, vacc1);
output += 32;
}
for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
diff --git a/src/f16-f32-vcvt/gen/vcvt-sse2-int16-x16.c b/src/f16-f32-vcvt/gen/vcvt-sse2-int16-x16.c
new file mode 100644
index 0000000..20d1d19
--- /dev/null
+++ b/src/f16-f32-vcvt/gen/vcvt-sse2-int16-x16.c
@@ -0,0 +1,156 @@
+// Auto-generated file. Do not edit!
+// Template: src/f16-f32-vcvt/sse-int16.c.in
+// Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vcvt.h>
+
+
+void xnn_f16_f32_vcvt_ukernel__sse2_int16_x16(
+ size_t n,
+ const void* input,
+ float* output,
+ const void* params)
+{
+ assert(n != 0);
+ assert(n % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(output != NULL);
+
+ const __m128i vsign_mask = _mm_set1_epi16(0x8000);
+ const __m128i vexp_offset = _mm_set1_epi16(0x7000);
+ const __m128 vexp_scale = _mm_set1_ps(0x1.0p-112f);
+ const __m128i vmagic_mask = _mm_set1_epi16(0x3F00);
+ const __m128 vmagic_bias = _mm_set1_ps(0.5f);
+ const __m128i vdenorm_cutoff = _mm_set1_epi16(0x0400);
+
+ const uint16_t* i = (const uint16_t*) input;
+ for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+ const __m128i vh0 = _mm_loadu_si128((const __m128i*) i);
+ const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8));
+ i += 16;
+
+ const __m128i vsign0 = _mm_and_si128(vh0, vsign_mask);
+ const __m128i vsign1 = _mm_and_si128(vh1, vsign_mask);
+
+ const __m128i vnonsign0 = _mm_xor_si128(vh0, vsign0);
+ const __m128i vnonsign1 = _mm_xor_si128(vh1, vsign1);
+
+ const __m128i vprenorm0 = _mm_slli_epi16(vnonsign0, 13);
+ const __m128i vprenorm1 = _mm_add_epi16(_mm_srli_epi16(vnonsign0, 3), vexp_offset);
+ const __m128i vprenorm2 = _mm_slli_epi16(vnonsign1, 13);
+ const __m128i vprenorm3 = _mm_add_epi16(_mm_srli_epi16(vnonsign1, 3), vexp_offset);
+
+ const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm0, vprenorm1)), vexp_scale));
+ const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm0, vprenorm1)), vexp_scale));
+ const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm2, vprenorm3)), vexp_scale));
+ const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm2, vprenorm3)), vexp_scale));
+
+ const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign0, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign0, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign1, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign1, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask0 = _mm_cmpgt_epi16(vnonsign0, vdenorm_cutoff);
+ const __m128i vmask1 = _mm_cmpgt_epi16(vnonsign1, vdenorm_cutoff);
+
+ const __m128i vxmask0 = _mm_unpacklo_epi16(vmask0, vmask0);
+ const __m128i vf0 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign0),
+ _mm_or_si128(_mm_and_si128(vxmask0, vnorm0), _mm_andnot_si128(vxmask0, vdenorm0)));
+ const __m128i vxmask1 = _mm_unpackhi_epi16(vmask0, vmask0);
+ const __m128i vf1 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign0),
+ _mm_or_si128(_mm_and_si128(vxmask1, vnorm1), _mm_andnot_si128(vxmask1, vdenorm1)));
+ const __m128i vxmask2 = _mm_unpacklo_epi16(vmask1, vmask1);
+ const __m128i vf2 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign1),
+ _mm_or_si128(_mm_and_si128(vxmask2, vnorm2), _mm_andnot_si128(vxmask2, vdenorm2)));
+ const __m128i vxmask3 = _mm_unpackhi_epi16(vmask1, vmask1);
+ const __m128i vf3 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign1),
+ _mm_or_si128(_mm_and_si128(vxmask3, vnorm3), _mm_andnot_si128(vxmask3, vdenorm3)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf0));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1));
+ _mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2));
+ _mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3));
+ output += 16;
+ }
+ for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+ i += 8;
+
+ const __m128i vsign = _mm_and_si128(vh, vsign_mask);
+
+ const __m128i vnonsign = _mm_xor_si128(vh, vsign);
+
+ const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
+ const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
+
+ const __m128i vxmask_lo = _mm_unpacklo_epi16(vmask, vmask);
+ const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
+ _mm_or_si128(_mm_and_si128(vxmask_lo, vnorm_lo), _mm_andnot_si128(vxmask_lo, vdenorm_lo)));
+
+ const __m128i vxmask_hi = _mm_unpackhi_epi16(vmask, vmask);
+ const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
+ _mm_or_si128(_mm_and_si128(vxmask_hi, vnorm_hi), _mm_andnot_si128(vxmask_hi, vdenorm_hi)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
+ output += 8;
+ }
+ if XNN_UNPREDICTABLE(n != 0) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+
+ const __m128i vsign = _mm_and_si128(vh, vsign_mask);
+
+ const __m128i vnonsign = _mm_xor_si128(vh, vsign);
+
+ const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
+ const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
+
+ const __m128i vxmask_lo = _mm_unpacklo_epi16(vmask, vmask);
+ __m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
+ _mm_or_si128(_mm_and_si128(vxmask_lo, vnorm_lo), _mm_andnot_si128(vxmask_lo, vdenorm_lo)));
+
+ if (n & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf));
+ output += 4;
+
+ const __m128i vxmask_hi = _mm_unpackhi_epi16(vmask, vmask);
+ vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
+ _mm_or_si128(_mm_and_si128(vxmask_hi, vnorm_hi), _mm_andnot_si128(vxmask_hi, vdenorm_hi)));
+ }
+ if (n & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
+ output += 2;
+
+ vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
+ }
+ if (n & (1 * sizeof(float))) {
+ _mm_store_ss(output, _mm_castsi128_ps(vf));
+ }
+ }
+}
diff --git a/src/f16-f32-vcvt/gen/vcvt-sse2-int16-x24.c b/src/f16-f32-vcvt/gen/vcvt-sse2-int16-x24.c
new file mode 100644
index 0000000..3bdea94
--- /dev/null
+++ b/src/f16-f32-vcvt/gen/vcvt-sse2-int16-x24.c
@@ -0,0 +1,174 @@
+// Auto-generated file. Do not edit!
+// Template: src/f16-f32-vcvt/sse-int16.c.in
+// Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vcvt.h>
+
+
+void xnn_f16_f32_vcvt_ukernel__sse2_int16_x24(
+ size_t n,
+ const void* input,
+ float* output,
+ const void* params)
+{
+ assert(n != 0);
+ assert(n % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(output != NULL);
+
+ const __m128i vsign_mask = _mm_set1_epi16(0x8000);
+ const __m128i vexp_offset = _mm_set1_epi16(0x7000);
+ const __m128 vexp_scale = _mm_set1_ps(0x1.0p-112f);
+ const __m128i vmagic_mask = _mm_set1_epi16(0x3F00);
+ const __m128 vmagic_bias = _mm_set1_ps(0.5f);
+ const __m128i vdenorm_cutoff = _mm_set1_epi16(0x0400);
+
+ const uint16_t* i = (const uint16_t*) input;
+ for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
+ const __m128i vh0 = _mm_loadu_si128((const __m128i*) i);
+ const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8));
+ const __m128i vh2 = _mm_loadu_si128((const __m128i*) (i + 16));
+ i += 24;
+
+ const __m128i vsign0 = _mm_and_si128(vh0, vsign_mask);
+ const __m128i vsign1 = _mm_and_si128(vh1, vsign_mask);
+ const __m128i vsign2 = _mm_and_si128(vh2, vsign_mask);
+
+ const __m128i vnonsign0 = _mm_xor_si128(vh0, vsign0);
+ const __m128i vnonsign1 = _mm_xor_si128(vh1, vsign1);
+ const __m128i vnonsign2 = _mm_xor_si128(vh2, vsign2);
+
+ const __m128i vprenorm0 = _mm_slli_epi16(vnonsign0, 13);
+ const __m128i vprenorm1 = _mm_add_epi16(_mm_srli_epi16(vnonsign0, 3), vexp_offset);
+ const __m128i vprenorm2 = _mm_slli_epi16(vnonsign1, 13);
+ const __m128i vprenorm3 = _mm_add_epi16(_mm_srli_epi16(vnonsign1, 3), vexp_offset);
+ const __m128i vprenorm4 = _mm_slli_epi16(vnonsign2, 13);
+ const __m128i vprenorm5 = _mm_add_epi16(_mm_srli_epi16(vnonsign2, 3), vexp_offset);
+
+ const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm0, vprenorm1)), vexp_scale));
+ const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm0, vprenorm1)), vexp_scale));
+ const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm2, vprenorm3)), vexp_scale));
+ const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm2, vprenorm3)), vexp_scale));
+ const __m128i vnorm4 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm4, vprenorm5)), vexp_scale));
+ const __m128i vnorm5 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm4, vprenorm5)), vexp_scale));
+
+ const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign0, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign0, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign1, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign1, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm4 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign2, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm5 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign2, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask0 = _mm_cmpgt_epi16(vnonsign0, vdenorm_cutoff);
+ const __m128i vmask1 = _mm_cmpgt_epi16(vnonsign1, vdenorm_cutoff);
+ const __m128i vmask2 = _mm_cmpgt_epi16(vnonsign2, vdenorm_cutoff);
+
+ const __m128i vxmask0 = _mm_unpacklo_epi16(vmask0, vmask0);
+ const __m128i vf0 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign0),
+ _mm_or_si128(_mm_and_si128(vxmask0, vnorm0), _mm_andnot_si128(vxmask0, vdenorm0)));
+ const __m128i vxmask1 = _mm_unpackhi_epi16(vmask0, vmask0);
+ const __m128i vf1 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign0),
+ _mm_or_si128(_mm_and_si128(vxmask1, vnorm1), _mm_andnot_si128(vxmask1, vdenorm1)));
+ const __m128i vxmask2 = _mm_unpacklo_epi16(vmask1, vmask1);
+ const __m128i vf2 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign1),
+ _mm_or_si128(_mm_and_si128(vxmask2, vnorm2), _mm_andnot_si128(vxmask2, vdenorm2)));
+ const __m128i vxmask3 = _mm_unpackhi_epi16(vmask1, vmask1);
+ const __m128i vf3 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign1),
+ _mm_or_si128(_mm_and_si128(vxmask3, vnorm3), _mm_andnot_si128(vxmask3, vdenorm3)));
+ const __m128i vxmask4 = _mm_unpacklo_epi16(vmask2, vmask2);
+ const __m128i vf4 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign2),
+ _mm_or_si128(_mm_and_si128(vxmask4, vnorm4), _mm_andnot_si128(vxmask4, vdenorm4)));
+ const __m128i vxmask5 = _mm_unpackhi_epi16(vmask2, vmask2);
+ const __m128i vf5 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign2),
+ _mm_or_si128(_mm_and_si128(vxmask5, vnorm5), _mm_andnot_si128(vxmask5, vdenorm5)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf0));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1));
+ _mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2));
+ _mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3));
+ _mm_storeu_ps(output + 16, _mm_castsi128_ps(vf4));
+ _mm_storeu_ps(output + 20, _mm_castsi128_ps(vf5));
+ output += 24;
+ }
+ for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+ i += 8;
+
+ const __m128i vsign = _mm_and_si128(vh, vsign_mask);
+
+ const __m128i vnonsign = _mm_xor_si128(vh, vsign);
+
+ const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
+ const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
+
+ const __m128i vxmask_lo = _mm_unpacklo_epi16(vmask, vmask);
+ const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
+ _mm_or_si128(_mm_and_si128(vxmask_lo, vnorm_lo), _mm_andnot_si128(vxmask_lo, vdenorm_lo)));
+
+ const __m128i vxmask_hi = _mm_unpackhi_epi16(vmask, vmask);
+ const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
+ _mm_or_si128(_mm_and_si128(vxmask_hi, vnorm_hi), _mm_andnot_si128(vxmask_hi, vdenorm_hi)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
+ output += 8;
+ }
+ if XNN_UNPREDICTABLE(n != 0) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+
+ const __m128i vsign = _mm_and_si128(vh, vsign_mask);
+
+ const __m128i vnonsign = _mm_xor_si128(vh, vsign);
+
+ const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
+ const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
+
+ const __m128i vxmask_lo = _mm_unpacklo_epi16(vmask, vmask);
+ __m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
+ _mm_or_si128(_mm_and_si128(vxmask_lo, vnorm_lo), _mm_andnot_si128(vxmask_lo, vdenorm_lo)));
+
+ if (n & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf));
+ output += 4;
+
+ const __m128i vxmask_hi = _mm_unpackhi_epi16(vmask, vmask);
+ vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
+ _mm_or_si128(_mm_and_si128(vxmask_hi, vnorm_hi), _mm_andnot_si128(vxmask_hi, vdenorm_hi)));
+ }
+ if (n & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
+ output += 2;
+
+ vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
+ }
+ if (n & (1 * sizeof(float))) {
+ _mm_store_ss(output, _mm_castsi128_ps(vf));
+ }
+ }
+}
diff --git a/src/f16-f32-vcvt/gen/vcvt-sse2-int16-x32.c b/src/f16-f32-vcvt/gen/vcvt-sse2-int16-x32.c
new file mode 100644
index 0000000..b37bf0b
--- /dev/null
+++ b/src/f16-f32-vcvt/gen/vcvt-sse2-int16-x32.c
@@ -0,0 +1,192 @@
+// Auto-generated file. Do not edit!
+// Template: src/f16-f32-vcvt/sse-int16.c.in
+// Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vcvt.h>
+
+
+void xnn_f16_f32_vcvt_ukernel__sse2_int16_x32(
+ size_t n,
+ const void* input,
+ float* output,
+ const void* params)
+{
+ assert(n != 0);
+ assert(n % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(output != NULL);
+
+ const __m128i vsign_mask = _mm_set1_epi16(0x8000);
+ const __m128i vexp_offset = _mm_set1_epi16(0x7000);
+ const __m128 vexp_scale = _mm_set1_ps(0x1.0p-112f);
+ const __m128i vmagic_mask = _mm_set1_epi16(0x3F00);
+ const __m128 vmagic_bias = _mm_set1_ps(0.5f);
+ const __m128i vdenorm_cutoff = _mm_set1_epi16(0x0400);
+
+ const uint16_t* i = (const uint16_t*) input;
+ for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
+ const __m128i vh0 = _mm_loadu_si128((const __m128i*) i);
+ const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8));
+ const __m128i vh2 = _mm_loadu_si128((const __m128i*) (i + 16));
+ const __m128i vh3 = _mm_loadu_si128((const __m128i*) (i + 24));
+ i += 32;
+
+ const __m128i vsign0 = _mm_and_si128(vh0, vsign_mask);
+ const __m128i vsign1 = _mm_and_si128(vh1, vsign_mask);
+ const __m128i vsign2 = _mm_and_si128(vh2, vsign_mask);
+ const __m128i vsign3 = _mm_and_si128(vh3, vsign_mask);
+
+ const __m128i vnonsign0 = _mm_xor_si128(vh0, vsign0);
+ const __m128i vnonsign1 = _mm_xor_si128(vh1, vsign1);
+ const __m128i vnonsign2 = _mm_xor_si128(vh2, vsign2);
+ const __m128i vnonsign3 = _mm_xor_si128(vh3, vsign3);
+
+ const __m128i vprenorm0 = _mm_slli_epi16(vnonsign0, 13);
+ const __m128i vprenorm1 = _mm_add_epi16(_mm_srli_epi16(vnonsign0, 3), vexp_offset);
+ const __m128i vprenorm2 = _mm_slli_epi16(vnonsign1, 13);
+ const __m128i vprenorm3 = _mm_add_epi16(_mm_srli_epi16(vnonsign1, 3), vexp_offset);
+ const __m128i vprenorm4 = _mm_slli_epi16(vnonsign2, 13);
+ const __m128i vprenorm5 = _mm_add_epi16(_mm_srli_epi16(vnonsign2, 3), vexp_offset);
+ const __m128i vprenorm6 = _mm_slli_epi16(vnonsign3, 13);
+ const __m128i vprenorm7 = _mm_add_epi16(_mm_srli_epi16(vnonsign3, 3), vexp_offset);
+
+ const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm0, vprenorm1)), vexp_scale));
+ const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm0, vprenorm1)), vexp_scale));
+ const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm2, vprenorm3)), vexp_scale));
+ const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm2, vprenorm3)), vexp_scale));
+ const __m128i vnorm4 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm4, vprenorm5)), vexp_scale));
+ const __m128i vnorm5 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm4, vprenorm5)), vexp_scale));
+ const __m128i vnorm6 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm6, vprenorm7)), vexp_scale));
+ const __m128i vnorm7 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm6, vprenorm7)), vexp_scale));
+
+ const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign0, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign0, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign1, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign1, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm4 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign2, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm5 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign2, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm6 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign3, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm7 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign3, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask0 = _mm_cmpgt_epi16(vnonsign0, vdenorm_cutoff);
+ const __m128i vmask1 = _mm_cmpgt_epi16(vnonsign1, vdenorm_cutoff);
+ const __m128i vmask2 = _mm_cmpgt_epi16(vnonsign2, vdenorm_cutoff);
+ const __m128i vmask3 = _mm_cmpgt_epi16(vnonsign3, vdenorm_cutoff);
+
+ const __m128i vxmask0 = _mm_unpacklo_epi16(vmask0, vmask0);
+ const __m128i vf0 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign0),
+ _mm_or_si128(_mm_and_si128(vxmask0, vnorm0), _mm_andnot_si128(vxmask0, vdenorm0)));
+ const __m128i vxmask1 = _mm_unpackhi_epi16(vmask0, vmask0);
+ const __m128i vf1 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign0),
+ _mm_or_si128(_mm_and_si128(vxmask1, vnorm1), _mm_andnot_si128(vxmask1, vdenorm1)));
+ const __m128i vxmask2 = _mm_unpacklo_epi16(vmask1, vmask1);
+ const __m128i vf2 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign1),
+ _mm_or_si128(_mm_and_si128(vxmask2, vnorm2), _mm_andnot_si128(vxmask2, vdenorm2)));
+ const __m128i vxmask3 = _mm_unpackhi_epi16(vmask1, vmask1);
+ const __m128i vf3 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign1),
+ _mm_or_si128(_mm_and_si128(vxmask3, vnorm3), _mm_andnot_si128(vxmask3, vdenorm3)));
+ const __m128i vxmask4 = _mm_unpacklo_epi16(vmask2, vmask2);
+ const __m128i vf4 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign2),
+ _mm_or_si128(_mm_and_si128(vxmask4, vnorm4), _mm_andnot_si128(vxmask4, vdenorm4)));
+ const __m128i vxmask5 = _mm_unpackhi_epi16(vmask2, vmask2);
+ const __m128i vf5 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign2),
+ _mm_or_si128(_mm_and_si128(vxmask5, vnorm5), _mm_andnot_si128(vxmask5, vdenorm5)));
+ const __m128i vxmask6 = _mm_unpacklo_epi16(vmask3, vmask3);
+ const __m128i vf6 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign3),
+ _mm_or_si128(_mm_and_si128(vxmask6, vnorm6), _mm_andnot_si128(vxmask6, vdenorm6)));
+ const __m128i vxmask7 = _mm_unpackhi_epi16(vmask3, vmask3);
+ const __m128i vf7 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign3),
+ _mm_or_si128(_mm_and_si128(vxmask7, vnorm7), _mm_andnot_si128(vxmask7, vdenorm7)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf0));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1));
+ _mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2));
+ _mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3));
+ _mm_storeu_ps(output + 16, _mm_castsi128_ps(vf4));
+ _mm_storeu_ps(output + 20, _mm_castsi128_ps(vf5));
+ _mm_storeu_ps(output + 24, _mm_castsi128_ps(vf6));
+ _mm_storeu_ps(output + 28, _mm_castsi128_ps(vf7));
+ output += 32;
+ }
+ for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+ i += 8;
+
+ const __m128i vsign = _mm_and_si128(vh, vsign_mask);
+
+ const __m128i vnonsign = _mm_xor_si128(vh, vsign);
+
+ const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
+ const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
+
+ const __m128i vxmask_lo = _mm_unpacklo_epi16(vmask, vmask);
+ const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
+ _mm_or_si128(_mm_and_si128(vxmask_lo, vnorm_lo), _mm_andnot_si128(vxmask_lo, vdenorm_lo)));
+
+ const __m128i vxmask_hi = _mm_unpackhi_epi16(vmask, vmask);
+ const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
+ _mm_or_si128(_mm_and_si128(vxmask_hi, vnorm_hi), _mm_andnot_si128(vxmask_hi, vdenorm_hi)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
+ output += 8;
+ }
+ if XNN_UNPREDICTABLE(n != 0) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+
+ const __m128i vsign = _mm_and_si128(vh, vsign_mask);
+
+ const __m128i vnonsign = _mm_xor_si128(vh, vsign);
+
+ const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
+ const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
+
+ const __m128i vxmask_lo = _mm_unpacklo_epi16(vmask, vmask);
+ __m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
+ _mm_or_si128(_mm_and_si128(vxmask_lo, vnorm_lo), _mm_andnot_si128(vxmask_lo, vdenorm_lo)));
+
+ if (n & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf));
+ output += 4;
+
+ const __m128i vxmask_hi = _mm_unpackhi_epi16(vmask, vmask);
+ vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
+ _mm_or_si128(_mm_and_si128(vxmask_hi, vnorm_hi), _mm_andnot_si128(vxmask_hi, vdenorm_hi)));
+ }
+ if (n & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
+ output += 2;
+
+ vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
+ }
+ if (n & (1 * sizeof(float))) {
+ _mm_store_ss(output, _mm_castsi128_ps(vf));
+ }
+ }
+}
diff --git a/src/f16-f32-vcvt/gen/vcvt-sse2-int16-x8.c b/src/f16-f32-vcvt/gen/vcvt-sse2-int16-x8.c
new file mode 100644
index 0000000..1b49887
--- /dev/null
+++ b/src/f16-f32-vcvt/gen/vcvt-sse2-int16-x8.c
@@ -0,0 +1,108 @@
+// Auto-generated file. Do not edit!
+// Template: src/f16-f32-vcvt/sse-int16.c.in
+// Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vcvt.h>
+
+
+void xnn_f16_f32_vcvt_ukernel__sse2_int16_x8(
+ size_t n,
+ const void* input,
+ float* output,
+ const void* params)
+{
+ assert(n != 0);
+ assert(n % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(output != NULL);
+
+ const __m128i vsign_mask = _mm_set1_epi16(0x8000);
+ const __m128i vexp_offset = _mm_set1_epi16(0x7000);
+ const __m128 vexp_scale = _mm_set1_ps(0x1.0p-112f);
+ const __m128i vmagic_mask = _mm_set1_epi16(0x3F00);
+ const __m128 vmagic_bias = _mm_set1_ps(0.5f);
+ const __m128i vdenorm_cutoff = _mm_set1_epi16(0x0400);
+
+ const uint16_t* i = (const uint16_t*) input;
+ for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+ i += 8;
+
+ const __m128i vsign = _mm_and_si128(vh, vsign_mask);
+
+ const __m128i vnonsign = _mm_xor_si128(vh, vsign);
+
+ const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
+ const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
+
+ const __m128i vxmask_lo = _mm_unpacklo_epi16(vmask, vmask);
+ const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
+ _mm_or_si128(_mm_and_si128(vxmask_lo, vnorm_lo), _mm_andnot_si128(vxmask_lo, vdenorm_lo)));
+
+ const __m128i vxmask_hi = _mm_unpackhi_epi16(vmask, vmask);
+ const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
+ _mm_or_si128(_mm_and_si128(vxmask_hi, vnorm_hi), _mm_andnot_si128(vxmask_hi, vdenorm_hi)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
+ output += 8;
+ }
+ if XNN_UNPREDICTABLE(n != 0) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+
+ const __m128i vsign = _mm_and_si128(vh, vsign_mask);
+
+ const __m128i vnonsign = _mm_xor_si128(vh, vsign);
+
+ const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
+ const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
+
+ const __m128i vxmask_lo = _mm_unpacklo_epi16(vmask, vmask);
+ __m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
+ _mm_or_si128(_mm_and_si128(vxmask_lo, vnorm_lo), _mm_andnot_si128(vxmask_lo, vdenorm_lo)));
+
+ if (n & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf));
+ output += 4;
+
+ const __m128i vxmask_hi = _mm_unpackhi_epi16(vmask, vmask);
+ vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
+ _mm_or_si128(_mm_and_si128(vxmask_hi, vnorm_hi), _mm_andnot_si128(vxmask_hi, vdenorm_hi)));
+ }
+ if (n & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
+ output += 2;
+
+ vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
+ }
+ if (n & (1 * sizeof(float))) {
+ _mm_store_ss(output, _mm_castsi128_ps(vf));
+ }
+ }
+}
diff --git a/src/f16-f32-vcvt/gen/vcvt-sse2-int32-x16.c b/src/f16-f32-vcvt/gen/vcvt-sse2-int32-x16.c
new file mode 100644
index 0000000..79a04cb
--- /dev/null
+++ b/src/f16-f32-vcvt/gen/vcvt-sse2-int32-x16.c
@@ -0,0 +1,158 @@
+// Auto-generated file. Do not edit!
+// Template: src/f16-f32-vcvt/sse-int32.c.in
+// Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vcvt.h>
+
+
+void xnn_f16_f32_vcvt_ukernel__sse2_int32_x16(
+ size_t n,
+ const void* input,
+ float* output,
+ const void* params)
+{
+ assert(n != 0);
+ assert(n % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(output != NULL);
+
+ const __m128i vsign_mask = _mm_set1_epi32(0x80000000);
+ const __m128i vexp_offset = _mm_set1_epi32(0x70000000);
+ const __m128 vexp_scale = _mm_set1_ps(0x1.0p-112f);
+ const __m128i vmagic_mask = _mm_set1_epi32(0x3F000000);
+ const __m128 vmagic_bias = _mm_set1_ps(0.5f);
+ const __m128i vdenorm_cutoff = _mm_set1_epi32(0x04000000);
+
+ const uint16_t* i = (const uint16_t*) input;
+ for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+ const __m128i vh0 = _mm_loadu_si128((const __m128i*) i);
+ const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8));
+ i += 16;
+
+ const __m128i vw0 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh0);
+ const __m128i vw1 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh0);
+ const __m128i vw2 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh1);
+ const __m128i vw3 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh1);
+
+ const __m128i vsign0 = _mm_and_si128(vw0, vsign_mask);
+ const __m128i vsign1 = _mm_and_si128(vw1, vsign_mask);
+ const __m128i vsign2 = _mm_and_si128(vw2, vsign_mask);
+ const __m128i vsign3 = _mm_and_si128(vw3, vsign_mask);
+
+ const __m128i vnonsign0 = _mm_xor_si128(vw0, vsign0);
+ const __m128i vnonsign1 = _mm_xor_si128(vw1, vsign1);
+ const __m128i vnonsign2 = _mm_xor_si128(vw2, vsign2);
+ const __m128i vnonsign3 = _mm_xor_si128(vw3, vsign3);
+
+ const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign0, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign1, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign2, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign3, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign0, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign1, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign2, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign3, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask0 = _mm_cmpgt_epi32(vnonsign0, vdenorm_cutoff);
+ const __m128i vmask1 = _mm_cmpgt_epi32(vnonsign1, vdenorm_cutoff);
+ const __m128i vmask2 = _mm_cmpgt_epi32(vnonsign2, vdenorm_cutoff);
+ const __m128i vmask3 = _mm_cmpgt_epi32(vnonsign3, vdenorm_cutoff);
+
+ const __m128i vf0 = _mm_or_si128(vsign0,
+ _mm_or_si128(_mm_and_si128(vmask0, vnorm0), _mm_andnot_si128(vmask0, vdenorm0)));
+ const __m128i vf1 = _mm_or_si128(vsign1,
+ _mm_or_si128(_mm_and_si128(vmask1, vnorm1), _mm_andnot_si128(vmask1, vdenorm1)));
+ const __m128i vf2 = _mm_or_si128(vsign2,
+ _mm_or_si128(_mm_and_si128(vmask2, vnorm2), _mm_andnot_si128(vmask2, vdenorm2)));
+ const __m128i vf3 = _mm_or_si128(vsign3,
+ _mm_or_si128(_mm_and_si128(vmask3, vnorm3), _mm_andnot_si128(vmask3, vdenorm3)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf0));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1));
+ _mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2));
+ _mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3));
+ output += 16;
+ }
+ for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+ i += 8;
+
+ const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
+ const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
+
+ const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
+ const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
+
+ const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
+ const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
+ const __m128i vf_lo = _mm_or_si128(vsign_lo,
+ _mm_or_si128(_mm_and_si128(vmask_lo, vnorm_lo), _mm_andnot_si128(vmask_lo, vdenorm_lo)));
+
+ const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
+ const __m128i vf_hi = _mm_or_si128(vsign_hi,
+ _mm_or_si128(_mm_and_si128(vmask_hi, vnorm_hi), _mm_andnot_si128(vmask_hi, vdenorm_hi)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
+ output += 8;
+ }
+ if XNN_UNPREDICTABLE(n != 0) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+
+ const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
+ const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
+
+ const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
+ const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
+
+ const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
+ const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
+ __m128i vf = _mm_or_si128(vsign_lo,
+ _mm_or_si128(_mm_and_si128(vmask_lo, vnorm_lo), _mm_andnot_si128(vmask_lo, vdenorm_lo)));
+
+ if (n & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf));
+ output += 4;
+
+ const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
+ vf = _mm_or_si128(vsign_hi,
+ _mm_or_si128(_mm_and_si128(vmask_hi, vnorm_hi), _mm_andnot_si128(vmask_hi, vdenorm_hi)));
+ }
+ if (n & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
+ output += 2;
+
+ vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
+ }
+ if (n & (1 * sizeof(float))) {
+ _mm_store_ss(output, _mm_castsi128_ps(vf));
+ }
+ }
+}
diff --git a/src/f16-f32-vcvt/gen/vcvt-sse2-int32-x24.c b/src/f16-f32-vcvt/gen/vcvt-sse2-int32-x24.c
new file mode 100644
index 0000000..c46a65a
--- /dev/null
+++ b/src/f16-f32-vcvt/gen/vcvt-sse2-int32-x24.c
@@ -0,0 +1,177 @@
+// Auto-generated file. Do not edit!
+// Template: src/f16-f32-vcvt/sse-int32.c.in
+// Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vcvt.h>
+
+
+void xnn_f16_f32_vcvt_ukernel__sse2_int32_x24(
+ size_t n,
+ const void* input,
+ float* output,
+ const void* params)
+{
+ assert(n != 0);
+ assert(n % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(output != NULL);
+
+ const __m128i vsign_mask = _mm_set1_epi32(0x80000000);
+ const __m128i vexp_offset = _mm_set1_epi32(0x70000000);
+ const __m128 vexp_scale = _mm_set1_ps(0x1.0p-112f);
+ const __m128i vmagic_mask = _mm_set1_epi32(0x3F000000);
+ const __m128 vmagic_bias = _mm_set1_ps(0.5f);
+ const __m128i vdenorm_cutoff = _mm_set1_epi32(0x04000000);
+
+ const uint16_t* i = (const uint16_t*) input;
+ for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
+ const __m128i vh0 = _mm_loadu_si128((const __m128i*) i);
+ const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8));
+ const __m128i vh2 = _mm_loadu_si128((const __m128i*) (i + 16));
+ i += 24;
+
+ const __m128i vw0 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh0);
+ const __m128i vw1 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh0);
+ const __m128i vw2 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh1);
+ const __m128i vw3 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh1);
+ const __m128i vw4 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh2);
+ const __m128i vw5 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh2);
+
+ const __m128i vsign0 = _mm_and_si128(vw0, vsign_mask);
+ const __m128i vsign1 = _mm_and_si128(vw1, vsign_mask);
+ const __m128i vsign2 = _mm_and_si128(vw2, vsign_mask);
+ const __m128i vsign3 = _mm_and_si128(vw3, vsign_mask);
+ const __m128i vsign4 = _mm_and_si128(vw4, vsign_mask);
+ const __m128i vsign5 = _mm_and_si128(vw5, vsign_mask);
+
+ const __m128i vnonsign0 = _mm_xor_si128(vw0, vsign0);
+ const __m128i vnonsign1 = _mm_xor_si128(vw1, vsign1);
+ const __m128i vnonsign2 = _mm_xor_si128(vw2, vsign2);
+ const __m128i vnonsign3 = _mm_xor_si128(vw3, vsign3);
+ const __m128i vnonsign4 = _mm_xor_si128(vw4, vsign4);
+ const __m128i vnonsign5 = _mm_xor_si128(vw5, vsign5);
+
+ const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign0, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign1, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign2, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign3, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm4 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign4, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm5 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign5, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign0, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign1, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign2, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign3, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm4 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign4, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm5 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign5, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask0 = _mm_cmpgt_epi32(vnonsign0, vdenorm_cutoff);
+ const __m128i vmask1 = _mm_cmpgt_epi32(vnonsign1, vdenorm_cutoff);
+ const __m128i vmask2 = _mm_cmpgt_epi32(vnonsign2, vdenorm_cutoff);
+ const __m128i vmask3 = _mm_cmpgt_epi32(vnonsign3, vdenorm_cutoff);
+ const __m128i vmask4 = _mm_cmpgt_epi32(vnonsign4, vdenorm_cutoff);
+ const __m128i vmask5 = _mm_cmpgt_epi32(vnonsign5, vdenorm_cutoff);
+
+ const __m128i vf0 = _mm_or_si128(vsign0,
+ _mm_or_si128(_mm_and_si128(vmask0, vnorm0), _mm_andnot_si128(vmask0, vdenorm0)));
+ const __m128i vf1 = _mm_or_si128(vsign1,
+ _mm_or_si128(_mm_and_si128(vmask1, vnorm1), _mm_andnot_si128(vmask1, vdenorm1)));
+ const __m128i vf2 = _mm_or_si128(vsign2,
+ _mm_or_si128(_mm_and_si128(vmask2, vnorm2), _mm_andnot_si128(vmask2, vdenorm2)));
+ const __m128i vf3 = _mm_or_si128(vsign3,
+ _mm_or_si128(_mm_and_si128(vmask3, vnorm3), _mm_andnot_si128(vmask3, vdenorm3)));
+ const __m128i vf4 = _mm_or_si128(vsign4,
+ _mm_or_si128(_mm_and_si128(vmask4, vnorm4), _mm_andnot_si128(vmask4, vdenorm4)));
+ const __m128i vf5 = _mm_or_si128(vsign5,
+ _mm_or_si128(_mm_and_si128(vmask5, vnorm5), _mm_andnot_si128(vmask5, vdenorm5)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf0));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1));
+ _mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2));
+ _mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3));
+ _mm_storeu_ps(output + 16, _mm_castsi128_ps(vf4));
+ _mm_storeu_ps(output + 20, _mm_castsi128_ps(vf5));
+ output += 24;
+ }
+ for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+ i += 8;
+
+ const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
+ const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
+
+ const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
+ const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
+
+ const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
+ const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
+ const __m128i vf_lo = _mm_or_si128(vsign_lo,
+ _mm_or_si128(_mm_and_si128(vmask_lo, vnorm_lo), _mm_andnot_si128(vmask_lo, vdenorm_lo)));
+
+ const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
+ const __m128i vf_hi = _mm_or_si128(vsign_hi,
+ _mm_or_si128(_mm_and_si128(vmask_hi, vnorm_hi), _mm_andnot_si128(vmask_hi, vdenorm_hi)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
+ output += 8;
+ }
+ if XNN_UNPREDICTABLE(n != 0) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+
+ const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
+ const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
+
+ const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
+ const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
+
+ const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
+ const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
+ __m128i vf = _mm_or_si128(vsign_lo,
+ _mm_or_si128(_mm_and_si128(vmask_lo, vnorm_lo), _mm_andnot_si128(vmask_lo, vdenorm_lo)));
+
+ if (n & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf));
+ output += 4;
+
+ const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
+ vf = _mm_or_si128(vsign_hi,
+ _mm_or_si128(_mm_and_si128(vmask_hi, vnorm_hi), _mm_andnot_si128(vmask_hi, vdenorm_hi)));
+ }
+ if (n & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
+ output += 2;
+
+ vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
+ }
+ if (n & (1 * sizeof(float))) {
+ _mm_store_ss(output, _mm_castsi128_ps(vf));
+ }
+ }
+}
diff --git a/src/f16-f32-vcvt/gen/vcvt-sse2-int32-x32.c b/src/f16-f32-vcvt/gen/vcvt-sse2-int32-x32.c
new file mode 100644
index 0000000..c900298
--- /dev/null
+++ b/src/f16-f32-vcvt/gen/vcvt-sse2-int32-x32.c
@@ -0,0 +1,196 @@
+// Auto-generated file. Do not edit!
+// Template: src/f16-f32-vcvt/sse-int32.c.in
+// Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vcvt.h>
+
+
+void xnn_f16_f32_vcvt_ukernel__sse2_int32_x32(
+ size_t n,
+ const void* input,
+ float* output,
+ const void* params)
+{
+ assert(n != 0);
+ assert(n % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(output != NULL);
+
+ const __m128i vsign_mask = _mm_set1_epi32(0x80000000);
+ const __m128i vexp_offset = _mm_set1_epi32(0x70000000);
+ const __m128 vexp_scale = _mm_set1_ps(0x1.0p-112f);
+ const __m128i vmagic_mask = _mm_set1_epi32(0x3F000000);
+ const __m128 vmagic_bias = _mm_set1_ps(0.5f);
+ const __m128i vdenorm_cutoff = _mm_set1_epi32(0x04000000);
+
+ const uint16_t* i = (const uint16_t*) input;
+ for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
+ const __m128i vh0 = _mm_loadu_si128((const __m128i*) i);
+ const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8));
+ const __m128i vh2 = _mm_loadu_si128((const __m128i*) (i + 16));
+ const __m128i vh3 = _mm_loadu_si128((const __m128i*) (i + 24));
+ i += 32;
+
+ const __m128i vw0 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh0);
+ const __m128i vw1 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh0);
+ const __m128i vw2 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh1);
+ const __m128i vw3 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh1);
+ const __m128i vw4 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh2);
+ const __m128i vw5 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh2);
+ const __m128i vw6 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh3);
+ const __m128i vw7 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh3);
+
+ const __m128i vsign0 = _mm_and_si128(vw0, vsign_mask);
+ const __m128i vsign1 = _mm_and_si128(vw1, vsign_mask);
+ const __m128i vsign2 = _mm_and_si128(vw2, vsign_mask);
+ const __m128i vsign3 = _mm_and_si128(vw3, vsign_mask);
+ const __m128i vsign4 = _mm_and_si128(vw4, vsign_mask);
+ const __m128i vsign5 = _mm_and_si128(vw5, vsign_mask);
+ const __m128i vsign6 = _mm_and_si128(vw6, vsign_mask);
+ const __m128i vsign7 = _mm_and_si128(vw7, vsign_mask);
+
+ const __m128i vnonsign0 = _mm_xor_si128(vw0, vsign0);
+ const __m128i vnonsign1 = _mm_xor_si128(vw1, vsign1);
+ const __m128i vnonsign2 = _mm_xor_si128(vw2, vsign2);
+ const __m128i vnonsign3 = _mm_xor_si128(vw3, vsign3);
+ const __m128i vnonsign4 = _mm_xor_si128(vw4, vsign4);
+ const __m128i vnonsign5 = _mm_xor_si128(vw5, vsign5);
+ const __m128i vnonsign6 = _mm_xor_si128(vw6, vsign6);
+ const __m128i vnonsign7 = _mm_xor_si128(vw7, vsign7);
+
+ const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign0, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign1, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign2, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign3, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm4 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign4, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm5 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign5, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm6 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign6, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm7 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign7, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign0, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign1, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign2, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign3, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm4 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign4, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm5 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign5, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm6 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign6, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm7 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign7, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask0 = _mm_cmpgt_epi32(vnonsign0, vdenorm_cutoff);
+ const __m128i vmask1 = _mm_cmpgt_epi32(vnonsign1, vdenorm_cutoff);
+ const __m128i vmask2 = _mm_cmpgt_epi32(vnonsign2, vdenorm_cutoff);
+ const __m128i vmask3 = _mm_cmpgt_epi32(vnonsign3, vdenorm_cutoff);
+ const __m128i vmask4 = _mm_cmpgt_epi32(vnonsign4, vdenorm_cutoff);
+ const __m128i vmask5 = _mm_cmpgt_epi32(vnonsign5, vdenorm_cutoff);
+ const __m128i vmask6 = _mm_cmpgt_epi32(vnonsign6, vdenorm_cutoff);
+ const __m128i vmask7 = _mm_cmpgt_epi32(vnonsign7, vdenorm_cutoff);
+
+ const __m128i vf0 = _mm_or_si128(vsign0,
+ _mm_or_si128(_mm_and_si128(vmask0, vnorm0), _mm_andnot_si128(vmask0, vdenorm0)));
+ const __m128i vf1 = _mm_or_si128(vsign1,
+ _mm_or_si128(_mm_and_si128(vmask1, vnorm1), _mm_andnot_si128(vmask1, vdenorm1)));
+ const __m128i vf2 = _mm_or_si128(vsign2,
+ _mm_or_si128(_mm_and_si128(vmask2, vnorm2), _mm_andnot_si128(vmask2, vdenorm2)));
+ const __m128i vf3 = _mm_or_si128(vsign3,
+ _mm_or_si128(_mm_and_si128(vmask3, vnorm3), _mm_andnot_si128(vmask3, vdenorm3)));
+ const __m128i vf4 = _mm_or_si128(vsign4,
+ _mm_or_si128(_mm_and_si128(vmask4, vnorm4), _mm_andnot_si128(vmask4, vdenorm4)));
+ const __m128i vf5 = _mm_or_si128(vsign5,
+ _mm_or_si128(_mm_and_si128(vmask5, vnorm5), _mm_andnot_si128(vmask5, vdenorm5)));
+ const __m128i vf6 = _mm_or_si128(vsign6,
+ _mm_or_si128(_mm_and_si128(vmask6, vnorm6), _mm_andnot_si128(vmask6, vdenorm6)));
+ const __m128i vf7 = _mm_or_si128(vsign7,
+ _mm_or_si128(_mm_and_si128(vmask7, vnorm7), _mm_andnot_si128(vmask7, vdenorm7)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf0));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1));
+ _mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2));
+ _mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3));
+ _mm_storeu_ps(output + 16, _mm_castsi128_ps(vf4));
+ _mm_storeu_ps(output + 20, _mm_castsi128_ps(vf5));
+ _mm_storeu_ps(output + 24, _mm_castsi128_ps(vf6));
+ _mm_storeu_ps(output + 28, _mm_castsi128_ps(vf7));
+ output += 32;
+ }
+ for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+ i += 8;
+
+ const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
+ const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
+
+ const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
+ const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
+
+ const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
+ const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
+ const __m128i vf_lo = _mm_or_si128(vsign_lo,
+ _mm_or_si128(_mm_and_si128(vmask_lo, vnorm_lo), _mm_andnot_si128(vmask_lo, vdenorm_lo)));
+
+ const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
+ const __m128i vf_hi = _mm_or_si128(vsign_hi,
+ _mm_or_si128(_mm_and_si128(vmask_hi, vnorm_hi), _mm_andnot_si128(vmask_hi, vdenorm_hi)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
+ output += 8;
+ }
+ if XNN_UNPREDICTABLE(n != 0) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+
+ const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
+ const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
+
+ const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
+ const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
+
+ const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
+ const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
+ __m128i vf = _mm_or_si128(vsign_lo,
+ _mm_or_si128(_mm_and_si128(vmask_lo, vnorm_lo), _mm_andnot_si128(vmask_lo, vdenorm_lo)));
+
+ if (n & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf));
+ output += 4;
+
+ const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
+ vf = _mm_or_si128(vsign_hi,
+ _mm_or_si128(_mm_and_si128(vmask_hi, vnorm_hi), _mm_andnot_si128(vmask_hi, vdenorm_hi)));
+ }
+ if (n & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
+ output += 2;
+
+ vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
+ }
+ if (n & (1 * sizeof(float))) {
+ _mm_store_ss(output, _mm_castsi128_ps(vf));
+ }
+ }
+}
diff --git a/src/f16-f32-vcvt/gen/vcvt-sse2-int32-x8.c b/src/f16-f32-vcvt/gen/vcvt-sse2-int32-x8.c
new file mode 100644
index 0000000..2df2612
--- /dev/null
+++ b/src/f16-f32-vcvt/gen/vcvt-sse2-int32-x8.c
@@ -0,0 +1,108 @@
+// Auto-generated file. Do not edit!
+// Template: src/f16-f32-vcvt/sse-int32.c.in
+// Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vcvt.h>
+
+
+void xnn_f16_f32_vcvt_ukernel__sse2_int32_x8(
+ size_t n,
+ const void* input,
+ float* output,
+ const void* params)
+{
+ assert(n != 0);
+ assert(n % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(output != NULL);
+
+ const __m128i vsign_mask = _mm_set1_epi32(0x80000000);
+ const __m128i vexp_offset = _mm_set1_epi32(0x70000000);
+ const __m128 vexp_scale = _mm_set1_ps(0x1.0p-112f);
+ const __m128i vmagic_mask = _mm_set1_epi32(0x3F000000);
+ const __m128 vmagic_bias = _mm_set1_ps(0.5f);
+ const __m128i vdenorm_cutoff = _mm_set1_epi32(0x04000000);
+
+ const uint16_t* i = (const uint16_t*) input;
+ for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+ i += 8;
+
+ const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
+ const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
+
+ const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
+ const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
+
+ const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
+ const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
+ const __m128i vf_lo = _mm_or_si128(vsign_lo,
+ _mm_or_si128(_mm_and_si128(vmask_lo, vnorm_lo), _mm_andnot_si128(vmask_lo, vdenorm_lo)));
+
+ const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
+ const __m128i vf_hi = _mm_or_si128(vsign_hi,
+ _mm_or_si128(_mm_and_si128(vmask_hi, vnorm_hi), _mm_andnot_si128(vmask_hi, vdenorm_hi)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
+ output += 8;
+ }
+ if XNN_UNPREDICTABLE(n != 0) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+
+ const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
+ const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
+
+ const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
+ const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
+
+ const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
+ const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
+ __m128i vf = _mm_or_si128(vsign_lo,
+ _mm_or_si128(_mm_and_si128(vmask_lo, vnorm_lo), _mm_andnot_si128(vmask_lo, vdenorm_lo)));
+
+ if (n & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf));
+ output += 4;
+
+ const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
+ vf = _mm_or_si128(vsign_hi,
+ _mm_or_si128(_mm_and_si128(vmask_hi, vnorm_hi), _mm_andnot_si128(vmask_hi, vdenorm_hi)));
+ }
+ if (n & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
+ output += 2;
+
+ vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
+ }
+ if (n & (1 * sizeof(float))) {
+ _mm_store_ss(output, _mm_castsi128_ps(vf));
+ }
+ }
+}
diff --git a/src/f16-f32-vcvt/gen/vcvt-sse41-int16-x16.c b/src/f16-f32-vcvt/gen/vcvt-sse41-int16-x16.c
new file mode 100644
index 0000000..ca0fd81
--- /dev/null
+++ b/src/f16-f32-vcvt/gen/vcvt-sse41-int16-x16.c
@@ -0,0 +1,148 @@
+// Auto-generated file. Do not edit!
+// Template: src/f16-f32-vcvt/sse-int16.c.in
+// Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vcvt.h>
+
+
+void xnn_f16_f32_vcvt_ukernel__sse41_int16_x16(
+ size_t n,
+ const void* input,
+ float* output,
+ const void* params)
+{
+ assert(n != 0);
+ assert(n % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(output != NULL);
+
+ const __m128i vsign_mask = _mm_set1_epi16(0x8000);
+ const __m128i vexp_offset = _mm_set1_epi16(0x7000);
+ const __m128 vexp_scale = _mm_set1_ps(0x1.0p-112f);
+ const __m128i vmagic_mask = _mm_set1_epi16(0x3F00);
+ const __m128 vmagic_bias = _mm_set1_ps(0.5f);
+ const __m128i vdenorm_cutoff = _mm_set1_epi16(0x0400);
+
+ const uint16_t* i = (const uint16_t*) input;
+ for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+ const __m128i vh0 = _mm_loadu_si128((const __m128i*) i);
+ const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8));
+ i += 16;
+
+ const __m128i vsign0 = _mm_and_si128(vh0, vsign_mask);
+ const __m128i vsign1 = _mm_and_si128(vh1, vsign_mask);
+
+ const __m128i vnonsign0 = _mm_xor_si128(vh0, vsign0);
+ const __m128i vnonsign1 = _mm_xor_si128(vh1, vsign1);
+
+ const __m128i vprenorm0 = _mm_slli_epi16(vnonsign0, 13);
+ const __m128i vprenorm1 = _mm_add_epi16(_mm_srli_epi16(vnonsign0, 3), vexp_offset);
+ const __m128i vprenorm2 = _mm_slli_epi16(vnonsign1, 13);
+ const __m128i vprenorm3 = _mm_add_epi16(_mm_srli_epi16(vnonsign1, 3), vexp_offset);
+
+ const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm0, vprenorm1)), vexp_scale));
+ const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm0, vprenorm1)), vexp_scale));
+ const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm2, vprenorm3)), vexp_scale));
+ const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm2, vprenorm3)), vexp_scale));
+
+ const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign0, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign0, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign1, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign1, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask0 = _mm_cmpgt_epi16(vnonsign0, vdenorm_cutoff);
+ const __m128i vmask1 = _mm_cmpgt_epi16(vnonsign1, vdenorm_cutoff);
+
+ const __m128i vf0 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign0),
+ _mm_blendv_epi8(vdenorm0, vnorm0, _mm_cvtepi16_epi32(vmask0)));
+ const __m128i vf1 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign0),
+ _mm_blendv_epi8(vdenorm1, vnorm1, _mm_unpackhi_epi16(vmask0, vmask0)));
+ const __m128i vf2 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign1),
+ _mm_blendv_epi8(vdenorm2, vnorm2, _mm_cvtepi16_epi32(vmask1)));
+ const __m128i vf3 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign1),
+ _mm_blendv_epi8(vdenorm3, vnorm3, _mm_unpackhi_epi16(vmask1, vmask1)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf0));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1));
+ _mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2));
+ _mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3));
+ output += 16;
+ }
+ for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+ i += 8;
+
+ const __m128i vsign = _mm_and_si128(vh, vsign_mask);
+
+ const __m128i vnonsign = _mm_xor_si128(vh, vsign);
+
+ const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
+ const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
+
+ const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask)));
+
+ const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
+ output += 8;
+ }
+ if XNN_UNPREDICTABLE(n != 0) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+
+ const __m128i vsign = _mm_and_si128(vh, vsign_mask);
+
+ const __m128i vnonsign = _mm_xor_si128(vh, vsign);
+
+ const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
+ const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
+
+ __m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask)));
+
+ if (n & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf));
+ output += 4;
+
+ vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask)));
+ }
+ if (n & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
+ output += 2;
+
+ vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
+ }
+ if (n & (1 * sizeof(float))) {
+ _mm_store_ss(output, _mm_castsi128_ps(vf));
+ }
+ }
+}
diff --git a/src/f16-f32-vcvt/gen/vcvt-sse41-int16-x24.c b/src/f16-f32-vcvt/gen/vcvt-sse41-int16-x24.c
new file mode 100644
index 0000000..3014a61
--- /dev/null
+++ b/src/f16-f32-vcvt/gen/vcvt-sse41-int16-x24.c
@@ -0,0 +1,164 @@
+// Auto-generated file. Do not edit!
+// Template: src/f16-f32-vcvt/sse-int16.c.in
+// Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vcvt.h>
+
+
+void xnn_f16_f32_vcvt_ukernel__sse41_int16_x24(
+ size_t n,
+ const void* input,
+ float* output,
+ const void* params)
+{
+ assert(n != 0);
+ assert(n % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(output != NULL);
+
+ const __m128i vsign_mask = _mm_set1_epi16(0x8000);
+ const __m128i vexp_offset = _mm_set1_epi16(0x7000);
+ const __m128 vexp_scale = _mm_set1_ps(0x1.0p-112f);
+ const __m128i vmagic_mask = _mm_set1_epi16(0x3F00);
+ const __m128 vmagic_bias = _mm_set1_ps(0.5f);
+ const __m128i vdenorm_cutoff = _mm_set1_epi16(0x0400);
+
+ const uint16_t* i = (const uint16_t*) input;
+ for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
+ const __m128i vh0 = _mm_loadu_si128((const __m128i*) i);
+ const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8));
+ const __m128i vh2 = _mm_loadu_si128((const __m128i*) (i + 16));
+ i += 24;
+
+ const __m128i vsign0 = _mm_and_si128(vh0, vsign_mask);
+ const __m128i vsign1 = _mm_and_si128(vh1, vsign_mask);
+ const __m128i vsign2 = _mm_and_si128(vh2, vsign_mask);
+
+ const __m128i vnonsign0 = _mm_xor_si128(vh0, vsign0);
+ const __m128i vnonsign1 = _mm_xor_si128(vh1, vsign1);
+ const __m128i vnonsign2 = _mm_xor_si128(vh2, vsign2);
+
+ const __m128i vprenorm0 = _mm_slli_epi16(vnonsign0, 13);
+ const __m128i vprenorm1 = _mm_add_epi16(_mm_srli_epi16(vnonsign0, 3), vexp_offset);
+ const __m128i vprenorm2 = _mm_slli_epi16(vnonsign1, 13);
+ const __m128i vprenorm3 = _mm_add_epi16(_mm_srli_epi16(vnonsign1, 3), vexp_offset);
+ const __m128i vprenorm4 = _mm_slli_epi16(vnonsign2, 13);
+ const __m128i vprenorm5 = _mm_add_epi16(_mm_srli_epi16(vnonsign2, 3), vexp_offset);
+
+ const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm0, vprenorm1)), vexp_scale));
+ const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm0, vprenorm1)), vexp_scale));
+ const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm2, vprenorm3)), vexp_scale));
+ const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm2, vprenorm3)), vexp_scale));
+ const __m128i vnorm4 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm4, vprenorm5)), vexp_scale));
+ const __m128i vnorm5 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm4, vprenorm5)), vexp_scale));
+
+ const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign0, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign0, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign1, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign1, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm4 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign2, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm5 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign2, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask0 = _mm_cmpgt_epi16(vnonsign0, vdenorm_cutoff);
+ const __m128i vmask1 = _mm_cmpgt_epi16(vnonsign1, vdenorm_cutoff);
+ const __m128i vmask2 = _mm_cmpgt_epi16(vnonsign2, vdenorm_cutoff);
+
+ const __m128i vf0 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign0),
+ _mm_blendv_epi8(vdenorm0, vnorm0, _mm_cvtepi16_epi32(vmask0)));
+ const __m128i vf1 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign0),
+ _mm_blendv_epi8(vdenorm1, vnorm1, _mm_unpackhi_epi16(vmask0, vmask0)));
+ const __m128i vf2 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign1),
+ _mm_blendv_epi8(vdenorm2, vnorm2, _mm_cvtepi16_epi32(vmask1)));
+ const __m128i vf3 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign1),
+ _mm_blendv_epi8(vdenorm3, vnorm3, _mm_unpackhi_epi16(vmask1, vmask1)));
+ const __m128i vf4 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign2),
+ _mm_blendv_epi8(vdenorm4, vnorm4, _mm_cvtepi16_epi32(vmask2)));
+ const __m128i vf5 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign2),
+ _mm_blendv_epi8(vdenorm5, vnorm5, _mm_unpackhi_epi16(vmask2, vmask2)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf0));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1));
+ _mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2));
+ _mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3));
+ _mm_storeu_ps(output + 16, _mm_castsi128_ps(vf4));
+ _mm_storeu_ps(output + 20, _mm_castsi128_ps(vf5));
+ output += 24;
+ }
+ for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+ i += 8;
+
+ const __m128i vsign = _mm_and_si128(vh, vsign_mask);
+
+ const __m128i vnonsign = _mm_xor_si128(vh, vsign);
+
+ const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
+ const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
+
+ const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask)));
+
+ const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
+ output += 8;
+ }
+ if XNN_UNPREDICTABLE(n != 0) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+
+ const __m128i vsign = _mm_and_si128(vh, vsign_mask);
+
+ const __m128i vnonsign = _mm_xor_si128(vh, vsign);
+
+ const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
+ const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
+
+ __m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask)));
+
+ if (n & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf));
+ output += 4;
+
+ vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask)));
+ }
+ if (n & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
+ output += 2;
+
+ vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
+ }
+ if (n & (1 * sizeof(float))) {
+ _mm_store_ss(output, _mm_castsi128_ps(vf));
+ }
+ }
+}
diff --git a/src/f16-f32-vcvt/gen/vcvt-sse41-int16-x32.c b/src/f16-f32-vcvt/gen/vcvt-sse41-int16-x32.c
new file mode 100644
index 0000000..d9c886a
--- /dev/null
+++ b/src/f16-f32-vcvt/gen/vcvt-sse41-int16-x32.c
@@ -0,0 +1,180 @@
+// Auto-generated file. Do not edit!
+// Template: src/f16-f32-vcvt/sse-int16.c.in
+// Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vcvt.h>
+
+
+void xnn_f16_f32_vcvt_ukernel__sse41_int16_x32(
+ size_t n,
+ const void* input,
+ float* output,
+ const void* params)
+{
+ assert(n != 0);
+ assert(n % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(output != NULL);
+
+ const __m128i vsign_mask = _mm_set1_epi16(0x8000);
+ const __m128i vexp_offset = _mm_set1_epi16(0x7000);
+ const __m128 vexp_scale = _mm_set1_ps(0x1.0p-112f);
+ const __m128i vmagic_mask = _mm_set1_epi16(0x3F00);
+ const __m128 vmagic_bias = _mm_set1_ps(0.5f);
+ const __m128i vdenorm_cutoff = _mm_set1_epi16(0x0400);
+
+ const uint16_t* i = (const uint16_t*) input;
+ for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
+ const __m128i vh0 = _mm_loadu_si128((const __m128i*) i);
+ const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8));
+ const __m128i vh2 = _mm_loadu_si128((const __m128i*) (i + 16));
+ const __m128i vh3 = _mm_loadu_si128((const __m128i*) (i + 24));
+ i += 32;
+
+ const __m128i vsign0 = _mm_and_si128(vh0, vsign_mask);
+ const __m128i vsign1 = _mm_and_si128(vh1, vsign_mask);
+ const __m128i vsign2 = _mm_and_si128(vh2, vsign_mask);
+ const __m128i vsign3 = _mm_and_si128(vh3, vsign_mask);
+
+ const __m128i vnonsign0 = _mm_xor_si128(vh0, vsign0);
+ const __m128i vnonsign1 = _mm_xor_si128(vh1, vsign1);
+ const __m128i vnonsign2 = _mm_xor_si128(vh2, vsign2);
+ const __m128i vnonsign3 = _mm_xor_si128(vh3, vsign3);
+
+ const __m128i vprenorm0 = _mm_slli_epi16(vnonsign0, 13);
+ const __m128i vprenorm1 = _mm_add_epi16(_mm_srli_epi16(vnonsign0, 3), vexp_offset);
+ const __m128i vprenorm2 = _mm_slli_epi16(vnonsign1, 13);
+ const __m128i vprenorm3 = _mm_add_epi16(_mm_srli_epi16(vnonsign1, 3), vexp_offset);
+ const __m128i vprenorm4 = _mm_slli_epi16(vnonsign2, 13);
+ const __m128i vprenorm5 = _mm_add_epi16(_mm_srli_epi16(vnonsign2, 3), vexp_offset);
+ const __m128i vprenorm6 = _mm_slli_epi16(vnonsign3, 13);
+ const __m128i vprenorm7 = _mm_add_epi16(_mm_srli_epi16(vnonsign3, 3), vexp_offset);
+
+ const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm0, vprenorm1)), vexp_scale));
+ const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm0, vprenorm1)), vexp_scale));
+ const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm2, vprenorm3)), vexp_scale));
+ const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm2, vprenorm3)), vexp_scale));
+ const __m128i vnorm4 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm4, vprenorm5)), vexp_scale));
+ const __m128i vnorm5 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm4, vprenorm5)), vexp_scale));
+ const __m128i vnorm6 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm6, vprenorm7)), vexp_scale));
+ const __m128i vnorm7 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm6, vprenorm7)), vexp_scale));
+
+ const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign0, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign0, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign1, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign1, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm4 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign2, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm5 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign2, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm6 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign3, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm7 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign3, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask0 = _mm_cmpgt_epi16(vnonsign0, vdenorm_cutoff);
+ const __m128i vmask1 = _mm_cmpgt_epi16(vnonsign1, vdenorm_cutoff);
+ const __m128i vmask2 = _mm_cmpgt_epi16(vnonsign2, vdenorm_cutoff);
+ const __m128i vmask3 = _mm_cmpgt_epi16(vnonsign3, vdenorm_cutoff);
+
+ const __m128i vf0 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign0),
+ _mm_blendv_epi8(vdenorm0, vnorm0, _mm_cvtepi16_epi32(vmask0)));
+ const __m128i vf1 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign0),
+ _mm_blendv_epi8(vdenorm1, vnorm1, _mm_unpackhi_epi16(vmask0, vmask0)));
+ const __m128i vf2 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign1),
+ _mm_blendv_epi8(vdenorm2, vnorm2, _mm_cvtepi16_epi32(vmask1)));
+ const __m128i vf3 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign1),
+ _mm_blendv_epi8(vdenorm3, vnorm3, _mm_unpackhi_epi16(vmask1, vmask1)));
+ const __m128i vf4 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign2),
+ _mm_blendv_epi8(vdenorm4, vnorm4, _mm_cvtepi16_epi32(vmask2)));
+ const __m128i vf5 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign2),
+ _mm_blendv_epi8(vdenorm5, vnorm5, _mm_unpackhi_epi16(vmask2, vmask2)));
+ const __m128i vf6 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign3),
+ _mm_blendv_epi8(vdenorm6, vnorm6, _mm_cvtepi16_epi32(vmask3)));
+ const __m128i vf7 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign3),
+ _mm_blendv_epi8(vdenorm7, vnorm7, _mm_unpackhi_epi16(vmask3, vmask3)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf0));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1));
+ _mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2));
+ _mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3));
+ _mm_storeu_ps(output + 16, _mm_castsi128_ps(vf4));
+ _mm_storeu_ps(output + 20, _mm_castsi128_ps(vf5));
+ _mm_storeu_ps(output + 24, _mm_castsi128_ps(vf6));
+ _mm_storeu_ps(output + 28, _mm_castsi128_ps(vf7));
+ output += 32;
+ }
+ for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+ i += 8;
+
+ const __m128i vsign = _mm_and_si128(vh, vsign_mask);
+
+ const __m128i vnonsign = _mm_xor_si128(vh, vsign);
+
+ const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
+ const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
+
+ const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask)));
+
+ const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
+ output += 8;
+ }
+ if XNN_UNPREDICTABLE(n != 0) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+
+ const __m128i vsign = _mm_and_si128(vh, vsign_mask);
+
+ const __m128i vnonsign = _mm_xor_si128(vh, vsign);
+
+ const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
+ const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
+
+ __m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask)));
+
+ if (n & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf));
+ output += 4;
+
+ vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask)));
+ }
+ if (n & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
+ output += 2;
+
+ vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
+ }
+ if (n & (1 * sizeof(float))) {
+ _mm_store_ss(output, _mm_castsi128_ps(vf));
+ }
+ }
+}
diff --git a/src/f16-f32-vcvt/gen/vcvt-sse41-int16-x8.c b/src/f16-f32-vcvt/gen/vcvt-sse41-int16-x8.c
new file mode 100644
index 0000000..f34993c
--- /dev/null
+++ b/src/f16-f32-vcvt/gen/vcvt-sse41-int16-x8.c
@@ -0,0 +1,104 @@
+// Auto-generated file. Do not edit!
+// Template: src/f16-f32-vcvt/sse-int16.c.in
+// Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vcvt.h>
+
+
+void xnn_f16_f32_vcvt_ukernel__sse41_int16_x8(
+ size_t n,
+ const void* input,
+ float* output,
+ const void* params)
+{
+ assert(n != 0);
+ assert(n % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(output != NULL);
+
+ const __m128i vsign_mask = _mm_set1_epi16(0x8000);
+ const __m128i vexp_offset = _mm_set1_epi16(0x7000);
+ const __m128 vexp_scale = _mm_set1_ps(0x1.0p-112f);
+ const __m128i vmagic_mask = _mm_set1_epi16(0x3F00);
+ const __m128 vmagic_bias = _mm_set1_ps(0.5f);
+ const __m128i vdenorm_cutoff = _mm_set1_epi16(0x0400);
+
+ const uint16_t* i = (const uint16_t*) input;
+ for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+ i += 8;
+
+ const __m128i vsign = _mm_and_si128(vh, vsign_mask);
+
+ const __m128i vnonsign = _mm_xor_si128(vh, vsign);
+
+ const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
+ const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
+
+ const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask)));
+
+ const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
+ output += 8;
+ }
+ if XNN_UNPREDICTABLE(n != 0) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+
+ const __m128i vsign = _mm_and_si128(vh, vsign_mask);
+
+ const __m128i vnonsign = _mm_xor_si128(vh, vsign);
+
+ const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
+ const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
+
+ __m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask)));
+
+ if (n & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf));
+ output += 4;
+
+ vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask)));
+ }
+ if (n & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
+ output += 2;
+
+ vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
+ }
+ if (n & (1 * sizeof(float))) {
+ _mm_store_ss(output, _mm_castsi128_ps(vf));
+ }
+ }
+}
diff --git a/src/f16-f32-vcvt/gen/vcvt-sse41-int32-x16.c b/src/f16-f32-vcvt/gen/vcvt-sse41-int32-x16.c
new file mode 100644
index 0000000..37bfc50
--- /dev/null
+++ b/src/f16-f32-vcvt/gen/vcvt-sse41-int32-x16.c
@@ -0,0 +1,150 @@
+// Auto-generated file. Do not edit!
+// Template: src/f16-f32-vcvt/sse-int32.c.in
+// Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vcvt.h>
+
+
+void xnn_f16_f32_vcvt_ukernel__sse41_int32_x16(
+ size_t n,
+ const void* input,
+ float* output,
+ const void* params)
+{
+ assert(n != 0);
+ assert(n % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(output != NULL);
+
+ const __m128i vsign_mask = _mm_set1_epi32(0x80000000);
+ const __m128i vexp_offset = _mm_set1_epi32(0x70000000);
+ const __m128 vexp_scale = _mm_set1_ps(0x1.0p-112f);
+ const __m128i vmagic_mask = _mm_set1_epi32(0x3F000000);
+ const __m128 vmagic_bias = _mm_set1_ps(0.5f);
+ const __m128i vdenorm_cutoff = _mm_set1_epi32(0x04000000);
+
+ const uint16_t* i = (const uint16_t*) input;
+ for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
+ const __m128i vh0 = _mm_loadu_si128((const __m128i*) i);
+ const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8));
+ i += 16;
+
+ const __m128i vw0 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh0);
+ const __m128i vw1 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh0);
+ const __m128i vw2 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh1);
+ const __m128i vw3 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh1);
+
+ const __m128i vsign0 = _mm_and_si128(vw0, vsign_mask);
+ const __m128i vsign1 = _mm_and_si128(vw1, vsign_mask);
+ const __m128i vsign2 = _mm_and_si128(vw2, vsign_mask);
+ const __m128i vsign3 = _mm_and_si128(vw3, vsign_mask);
+
+ const __m128i vnonsign0 = _mm_xor_si128(vw0, vsign0);
+ const __m128i vnonsign1 = _mm_xor_si128(vw1, vsign1);
+ const __m128i vnonsign2 = _mm_xor_si128(vw2, vsign2);
+ const __m128i vnonsign3 = _mm_xor_si128(vw3, vsign3);
+
+ const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign0, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign1, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign2, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign3, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign0, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign1, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign2, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign3, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask0 = _mm_cmpgt_epi32(vnonsign0, vdenorm_cutoff);
+ const __m128i vmask1 = _mm_cmpgt_epi32(vnonsign1, vdenorm_cutoff);
+ const __m128i vmask2 = _mm_cmpgt_epi32(vnonsign2, vdenorm_cutoff);
+ const __m128i vmask3 = _mm_cmpgt_epi32(vnonsign3, vdenorm_cutoff);
+
+ const __m128i vf0 = _mm_or_si128(vsign0, _mm_blendv_epi8(vdenorm0, vnorm0, vmask0));
+ const __m128i vf1 = _mm_or_si128(vsign1, _mm_blendv_epi8(vdenorm1, vnorm1, vmask1));
+ const __m128i vf2 = _mm_or_si128(vsign2, _mm_blendv_epi8(vdenorm2, vnorm2, vmask2));
+ const __m128i vf3 = _mm_or_si128(vsign3, _mm_blendv_epi8(vdenorm3, vnorm3, vmask3));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf0));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1));
+ _mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2));
+ _mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3));
+ output += 16;
+ }
+ for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+ i += 8;
+
+ const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
+ const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
+
+ const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
+ const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
+
+ const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
+ const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
+ const __m128i vf_lo = _mm_or_si128(vsign_lo, _mm_blendv_epi8(vdenorm_lo, vnorm_lo, vmask_lo));
+
+ const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
+ const __m128i vf_hi = _mm_or_si128(vsign_hi, _mm_blendv_epi8(vdenorm_hi, vnorm_hi, vmask_hi));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
+ output += 8;
+ }
+ if XNN_UNPREDICTABLE(n != 0) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+
+ const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
+ const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
+
+ const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
+ const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
+
+ const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
+ const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
+ __m128i vf = _mm_or_si128(vsign_lo, _mm_blendv_epi8(vdenorm_lo, vnorm_lo, vmask_lo));
+
+ if (n & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf));
+ output += 4;
+
+ const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
+ vf = _mm_or_si128(vsign_hi, _mm_blendv_epi8(vdenorm_hi, vnorm_hi, vmask_hi));
+ }
+ if (n & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
+ output += 2;
+
+ vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
+ }
+ if (n & (1 * sizeof(float))) {
+ _mm_store_ss(output, _mm_castsi128_ps(vf));
+ }
+ }
+}
diff --git a/src/f16-f32-vcvt/gen/vcvt-sse41-int32-x24.c b/src/f16-f32-vcvt/gen/vcvt-sse41-int32-x24.c
new file mode 100644
index 0000000..e08fd67
--- /dev/null
+++ b/src/f16-f32-vcvt/gen/vcvt-sse41-int32-x24.c
@@ -0,0 +1,167 @@
+// Auto-generated file. Do not edit!
+// Template: src/f16-f32-vcvt/sse-int32.c.in
+// Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vcvt.h>
+
+
+void xnn_f16_f32_vcvt_ukernel__sse41_int32_x24(
+ size_t n,
+ const void* input,
+ float* output,
+ const void* params)
+{
+ assert(n != 0);
+ assert(n % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(output != NULL);
+
+ const __m128i vsign_mask = _mm_set1_epi32(0x80000000);
+ const __m128i vexp_offset = _mm_set1_epi32(0x70000000);
+ const __m128 vexp_scale = _mm_set1_ps(0x1.0p-112f);
+ const __m128i vmagic_mask = _mm_set1_epi32(0x3F000000);
+ const __m128 vmagic_bias = _mm_set1_ps(0.5f);
+ const __m128i vdenorm_cutoff = _mm_set1_epi32(0x04000000);
+
+ const uint16_t* i = (const uint16_t*) input;
+ for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
+ const __m128i vh0 = _mm_loadu_si128((const __m128i*) i);
+ const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8));
+ const __m128i vh2 = _mm_loadu_si128((const __m128i*) (i + 16));
+ i += 24;
+
+ const __m128i vw0 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh0);
+ const __m128i vw1 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh0);
+ const __m128i vw2 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh1);
+ const __m128i vw3 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh1);
+ const __m128i vw4 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh2);
+ const __m128i vw5 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh2);
+
+ const __m128i vsign0 = _mm_and_si128(vw0, vsign_mask);
+ const __m128i vsign1 = _mm_and_si128(vw1, vsign_mask);
+ const __m128i vsign2 = _mm_and_si128(vw2, vsign_mask);
+ const __m128i vsign3 = _mm_and_si128(vw3, vsign_mask);
+ const __m128i vsign4 = _mm_and_si128(vw4, vsign_mask);
+ const __m128i vsign5 = _mm_and_si128(vw5, vsign_mask);
+
+ const __m128i vnonsign0 = _mm_xor_si128(vw0, vsign0);
+ const __m128i vnonsign1 = _mm_xor_si128(vw1, vsign1);
+ const __m128i vnonsign2 = _mm_xor_si128(vw2, vsign2);
+ const __m128i vnonsign3 = _mm_xor_si128(vw3, vsign3);
+ const __m128i vnonsign4 = _mm_xor_si128(vw4, vsign4);
+ const __m128i vnonsign5 = _mm_xor_si128(vw5, vsign5);
+
+ const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign0, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign1, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign2, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign3, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm4 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign4, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm5 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign5, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign0, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign1, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign2, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign3, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm4 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign4, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm5 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign5, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask0 = _mm_cmpgt_epi32(vnonsign0, vdenorm_cutoff);
+ const __m128i vmask1 = _mm_cmpgt_epi32(vnonsign1, vdenorm_cutoff);
+ const __m128i vmask2 = _mm_cmpgt_epi32(vnonsign2, vdenorm_cutoff);
+ const __m128i vmask3 = _mm_cmpgt_epi32(vnonsign3, vdenorm_cutoff);
+ const __m128i vmask4 = _mm_cmpgt_epi32(vnonsign4, vdenorm_cutoff);
+ const __m128i vmask5 = _mm_cmpgt_epi32(vnonsign5, vdenorm_cutoff);
+
+ const __m128i vf0 = _mm_or_si128(vsign0, _mm_blendv_epi8(vdenorm0, vnorm0, vmask0));
+ const __m128i vf1 = _mm_or_si128(vsign1, _mm_blendv_epi8(vdenorm1, vnorm1, vmask1));
+ const __m128i vf2 = _mm_or_si128(vsign2, _mm_blendv_epi8(vdenorm2, vnorm2, vmask2));
+ const __m128i vf3 = _mm_or_si128(vsign3, _mm_blendv_epi8(vdenorm3, vnorm3, vmask3));
+ const __m128i vf4 = _mm_or_si128(vsign4, _mm_blendv_epi8(vdenorm4, vnorm4, vmask4));
+ const __m128i vf5 = _mm_or_si128(vsign5, _mm_blendv_epi8(vdenorm5, vnorm5, vmask5));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf0));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1));
+ _mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2));
+ _mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3));
+ _mm_storeu_ps(output + 16, _mm_castsi128_ps(vf4));
+ _mm_storeu_ps(output + 20, _mm_castsi128_ps(vf5));
+ output += 24;
+ }
+ for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+ i += 8;
+
+ const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
+ const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
+
+ const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
+ const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
+
+ const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
+ const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
+ const __m128i vf_lo = _mm_or_si128(vsign_lo, _mm_blendv_epi8(vdenorm_lo, vnorm_lo, vmask_lo));
+
+ const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
+ const __m128i vf_hi = _mm_or_si128(vsign_hi, _mm_blendv_epi8(vdenorm_hi, vnorm_hi, vmask_hi));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
+ output += 8;
+ }
+ if XNN_UNPREDICTABLE(n != 0) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+
+ const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
+ const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
+
+ const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
+ const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
+
+ const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
+ const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
+ __m128i vf = _mm_or_si128(vsign_lo, _mm_blendv_epi8(vdenorm_lo, vnorm_lo, vmask_lo));
+
+ if (n & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf));
+ output += 4;
+
+ const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
+ vf = _mm_or_si128(vsign_hi, _mm_blendv_epi8(vdenorm_hi, vnorm_hi, vmask_hi));
+ }
+ if (n & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
+ output += 2;
+
+ vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
+ }
+ if (n & (1 * sizeof(float))) {
+ _mm_store_ss(output, _mm_castsi128_ps(vf));
+ }
+ }
+}
diff --git a/src/f16-f32-vcvt/gen/vcvt-sse41-int32-x32.c b/src/f16-f32-vcvt/gen/vcvt-sse41-int32-x32.c
new file mode 100644
index 0000000..34bdb9f
--- /dev/null
+++ b/src/f16-f32-vcvt/gen/vcvt-sse41-int32-x32.c
@@ -0,0 +1,184 @@
+// Auto-generated file. Do not edit!
+// Template: src/f16-f32-vcvt/sse-int32.c.in
+// Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vcvt.h>
+
+
+void xnn_f16_f32_vcvt_ukernel__sse41_int32_x32(
+ size_t n,
+ const void* input,
+ float* output,
+ const void* params)
+{
+ assert(n != 0);
+ assert(n % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(output != NULL);
+
+ const __m128i vsign_mask = _mm_set1_epi32(0x80000000);
+ const __m128i vexp_offset = _mm_set1_epi32(0x70000000);
+ const __m128 vexp_scale = _mm_set1_ps(0x1.0p-112f);
+ const __m128i vmagic_mask = _mm_set1_epi32(0x3F000000);
+ const __m128 vmagic_bias = _mm_set1_ps(0.5f);
+ const __m128i vdenorm_cutoff = _mm_set1_epi32(0x04000000);
+
+ const uint16_t* i = (const uint16_t*) input;
+ for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
+ const __m128i vh0 = _mm_loadu_si128((const __m128i*) i);
+ const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8));
+ const __m128i vh2 = _mm_loadu_si128((const __m128i*) (i + 16));
+ const __m128i vh3 = _mm_loadu_si128((const __m128i*) (i + 24));
+ i += 32;
+
+ const __m128i vw0 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh0);
+ const __m128i vw1 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh0);
+ const __m128i vw2 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh1);
+ const __m128i vw3 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh1);
+ const __m128i vw4 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh2);
+ const __m128i vw5 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh2);
+ const __m128i vw6 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh3);
+ const __m128i vw7 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh3);
+
+ const __m128i vsign0 = _mm_and_si128(vw0, vsign_mask);
+ const __m128i vsign1 = _mm_and_si128(vw1, vsign_mask);
+ const __m128i vsign2 = _mm_and_si128(vw2, vsign_mask);
+ const __m128i vsign3 = _mm_and_si128(vw3, vsign_mask);
+ const __m128i vsign4 = _mm_and_si128(vw4, vsign_mask);
+ const __m128i vsign5 = _mm_and_si128(vw5, vsign_mask);
+ const __m128i vsign6 = _mm_and_si128(vw6, vsign_mask);
+ const __m128i vsign7 = _mm_and_si128(vw7, vsign_mask);
+
+ const __m128i vnonsign0 = _mm_xor_si128(vw0, vsign0);
+ const __m128i vnonsign1 = _mm_xor_si128(vw1, vsign1);
+ const __m128i vnonsign2 = _mm_xor_si128(vw2, vsign2);
+ const __m128i vnonsign3 = _mm_xor_si128(vw3, vsign3);
+ const __m128i vnonsign4 = _mm_xor_si128(vw4, vsign4);
+ const __m128i vnonsign5 = _mm_xor_si128(vw5, vsign5);
+ const __m128i vnonsign6 = _mm_xor_si128(vw6, vsign6);
+ const __m128i vnonsign7 = _mm_xor_si128(vw7, vsign7);
+
+ const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign0, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign1, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign2, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign3, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm4 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign4, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm5 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign5, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm6 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign6, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm7 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign7, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign0, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign1, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign2, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign3, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm4 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign4, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm5 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign5, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm6 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign6, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm7 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign7, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask0 = _mm_cmpgt_epi32(vnonsign0, vdenorm_cutoff);
+ const __m128i vmask1 = _mm_cmpgt_epi32(vnonsign1, vdenorm_cutoff);
+ const __m128i vmask2 = _mm_cmpgt_epi32(vnonsign2, vdenorm_cutoff);
+ const __m128i vmask3 = _mm_cmpgt_epi32(vnonsign3, vdenorm_cutoff);
+ const __m128i vmask4 = _mm_cmpgt_epi32(vnonsign4, vdenorm_cutoff);
+ const __m128i vmask5 = _mm_cmpgt_epi32(vnonsign5, vdenorm_cutoff);
+ const __m128i vmask6 = _mm_cmpgt_epi32(vnonsign6, vdenorm_cutoff);
+ const __m128i vmask7 = _mm_cmpgt_epi32(vnonsign7, vdenorm_cutoff);
+
+ const __m128i vf0 = _mm_or_si128(vsign0, _mm_blendv_epi8(vdenorm0, vnorm0, vmask0));
+ const __m128i vf1 = _mm_or_si128(vsign1, _mm_blendv_epi8(vdenorm1, vnorm1, vmask1));
+ const __m128i vf2 = _mm_or_si128(vsign2, _mm_blendv_epi8(vdenorm2, vnorm2, vmask2));
+ const __m128i vf3 = _mm_or_si128(vsign3, _mm_blendv_epi8(vdenorm3, vnorm3, vmask3));
+ const __m128i vf4 = _mm_or_si128(vsign4, _mm_blendv_epi8(vdenorm4, vnorm4, vmask4));
+ const __m128i vf5 = _mm_or_si128(vsign5, _mm_blendv_epi8(vdenorm5, vnorm5, vmask5));
+ const __m128i vf6 = _mm_or_si128(vsign6, _mm_blendv_epi8(vdenorm6, vnorm6, vmask6));
+ const __m128i vf7 = _mm_or_si128(vsign7, _mm_blendv_epi8(vdenorm7, vnorm7, vmask7));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf0));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1));
+ _mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2));
+ _mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3));
+ _mm_storeu_ps(output + 16, _mm_castsi128_ps(vf4));
+ _mm_storeu_ps(output + 20, _mm_castsi128_ps(vf5));
+ _mm_storeu_ps(output + 24, _mm_castsi128_ps(vf6));
+ _mm_storeu_ps(output + 28, _mm_castsi128_ps(vf7));
+ output += 32;
+ }
+ for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+ i += 8;
+
+ const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
+ const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
+
+ const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
+ const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
+
+ const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
+ const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
+ const __m128i vf_lo = _mm_or_si128(vsign_lo, _mm_blendv_epi8(vdenorm_lo, vnorm_lo, vmask_lo));
+
+ const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
+ const __m128i vf_hi = _mm_or_si128(vsign_hi, _mm_blendv_epi8(vdenorm_hi, vnorm_hi, vmask_hi));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
+ output += 8;
+ }
+ if XNN_UNPREDICTABLE(n != 0) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+
+ const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
+ const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
+
+ const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
+ const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
+
+ const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
+ const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
+ __m128i vf = _mm_or_si128(vsign_lo, _mm_blendv_epi8(vdenorm_lo, vnorm_lo, vmask_lo));
+
+ if (n & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf));
+ output += 4;
+
+ const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
+ vf = _mm_or_si128(vsign_hi, _mm_blendv_epi8(vdenorm_hi, vnorm_hi, vmask_hi));
+ }
+ if (n & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
+ output += 2;
+
+ vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
+ }
+ if (n & (1 * sizeof(float))) {
+ _mm_store_ss(output, _mm_castsi128_ps(vf));
+ }
+ }
+}
diff --git a/src/f16-f32-vcvt/gen/vcvt-sse41-int32-x8.c b/src/f16-f32-vcvt/gen/vcvt-sse41-int32-x8.c
new file mode 100644
index 0000000..931f577
--- /dev/null
+++ b/src/f16-f32-vcvt/gen/vcvt-sse41-int32-x8.c
@@ -0,0 +1,104 @@
+// Auto-generated file. Do not edit!
+// Template: src/f16-f32-vcvt/sse-int32.c.in
+// Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vcvt.h>
+
+
+void xnn_f16_f32_vcvt_ukernel__sse41_int32_x8(
+ size_t n,
+ const void* input,
+ float* output,
+ const void* params)
+{
+ assert(n != 0);
+ assert(n % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(output != NULL);
+
+ const __m128i vsign_mask = _mm_set1_epi32(0x80000000);
+ const __m128i vexp_offset = _mm_set1_epi32(0x70000000);
+ const __m128 vexp_scale = _mm_set1_ps(0x1.0p-112f);
+ const __m128i vmagic_mask = _mm_set1_epi32(0x3F000000);
+ const __m128 vmagic_bias = _mm_set1_ps(0.5f);
+ const __m128i vdenorm_cutoff = _mm_set1_epi32(0x04000000);
+
+ const uint16_t* i = (const uint16_t*) input;
+ for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+ i += 8;
+
+ const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
+ const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
+
+ const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
+ const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
+
+ const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
+ const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
+ const __m128i vf_lo = _mm_or_si128(vsign_lo, _mm_blendv_epi8(vdenorm_lo, vnorm_lo, vmask_lo));
+
+ const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
+ const __m128i vf_hi = _mm_or_si128(vsign_hi, _mm_blendv_epi8(vdenorm_hi, vnorm_hi, vmask_hi));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
+ output += 8;
+ }
+ if XNN_UNPREDICTABLE(n != 0) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+
+ const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
+ const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
+
+ const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
+ const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
+
+ const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
+ const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
+ __m128i vf = _mm_or_si128(vsign_lo, _mm_blendv_epi8(vdenorm_lo, vnorm_lo, vmask_lo));
+
+ if (n & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf));
+ output += 4;
+
+ const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
+ vf = _mm_or_si128(vsign_hi, _mm_blendv_epi8(vdenorm_hi, vnorm_hi, vmask_hi));
+ }
+ if (n & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
+ output += 2;
+
+ vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
+ }
+ if (n & (1 * sizeof(float))) {
+ _mm_store_ss(output, _mm_castsi128_ps(vf));
+ }
+ }
+}
diff --git a/src/f16-f32-vcvt/sse-int16.c.in b/src/f16-f32-vcvt/sse-int16.c.in
new file mode 100644
index 0000000..59a1511
--- /dev/null
+++ b/src/f16-f32-vcvt/sse-int16.c.in
@@ -0,0 +1,175 @@
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert SSE in [2, 4]
+$assert not AVX or SSE == 4
+$assert BATCH_TILE % 8 == 0
+$assert BATCH_TILE >= 8
+$SIMD_TILE = BATCH_TILE // 8
+$SSE_HEADER = {2: "emmintrin.h", 4: "smmintrin.h"}[SSE]
+$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+#include <assert.h>
+
+#include <${SSE_HEADER}>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vcvt.h>
+
+
+$ISA = "avx" if AVX else {2: "sse2", 4: "sse41"}[SSE]
+void xnn_f16_f32_vcvt_ukernel__${ISA}_int16_x${BATCH_TILE}(
+ size_t n,
+ const void* input,
+ float* output,
+ const void* params)
+{
+ assert(n != 0);
+ assert(n % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(output != NULL);
+
+ const __m128i vsign_mask = _mm_set1_epi16(0x8000);
+ const __m128i vexp_offset = _mm_set1_epi16(0x7000);
+ const __m128 vexp_scale = _mm_set1_ps(0x1.0p-112f);
+ const __m128i vmagic_mask = _mm_set1_epi16(0x3F00);
+ const __m128 vmagic_bias = _mm_set1_ps(0.5f);
+ const __m128i vdenorm_cutoff = _mm_set1_epi16(0x0400);
+
+ const uint16_t* i = (const uint16_t*) input;
+ $if BATCH_TILE > 8:
+ for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
+ const __m128i vh0 = _mm_loadu_si128((const __m128i*) i);
+ $for N in range(1, SIMD_TILE):
+ const __m128i vh${N} = _mm_loadu_si128((const __m128i*) (i + ${N * 8}));
+ i += ${BATCH_TILE};
+
+ $for N in range(SIMD_TILE):
+ const __m128i vsign${N} = _mm_and_si128(vh${N}, vsign_mask);
+
+ $for N in range(SIMD_TILE):
+ const __m128i vnonsign${N} = _mm_xor_si128(vh${N}, vsign${N});
+
+ $for N in range(SIMD_TILE):
+ const __m128i vprenorm${2*N} = _mm_slli_epi16(vnonsign${N}, 13);
+ const __m128i vprenorm${2*N+1} = _mm_add_epi16(_mm_srli_epi16(vnonsign${N}, 3), vexp_offset);
+
+ $for N in range(SIMD_TILE):
+ const __m128i vnorm${2*N} = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm${2*N}, vprenorm${2*N+1})), vexp_scale));
+ const __m128i vnorm${2*N+1} = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm${2*N}, vprenorm${2*N+1})), vexp_scale));
+
+ $for N in range(SIMD_TILE):
+ const __m128i vdenorm${2*N} = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign${N}, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm${2*N+1} = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign${N}, vmagic_mask)), vmagic_bias));
+
+ $for N in range(SIMD_TILE):
+ const __m128i vmask${N} = _mm_cmpgt_epi16(vnonsign${N}, vdenorm_cutoff);
+
+ $for N in range(SIMD_TILE):
+ $if SSE == 4:
+ const __m128i vf${2*N} = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign${N}),
+ _mm_blendv_epi8(vdenorm${2*N}, vnorm${2*N}, _mm_cvtepi16_epi32(vmask${N})));
+ const __m128i vf${2*N+1} = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign${N}),
+ _mm_blendv_epi8(vdenorm${2*N+1}, vnorm${2*N+1}, _mm_unpackhi_epi16(vmask${N}, vmask${N})));
+ $else:
+ const __m128i vxmask${2*N} = _mm_unpacklo_epi16(vmask${N}, vmask${N});
+ const __m128i vf${2*N} = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign${N}),
+ _mm_or_si128(_mm_and_si128(vxmask${2*N}, vnorm${2*N}), _mm_andnot_si128(vxmask${2*N}, vdenorm${2*N})));
+ const __m128i vxmask${2*N+1} = _mm_unpackhi_epi16(vmask${N}, vmask${N});
+ const __m128i vf${2*N+1} = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign${N}),
+ _mm_or_si128(_mm_and_si128(vxmask${2*N+1}, vnorm${2*N+1}), _mm_andnot_si128(vxmask${2*N+1}, vdenorm${2*N+1})));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf0));
+ $for N in range(1, 2*SIMD_TILE):
+ _mm_storeu_ps(output + ${N * 4}, _mm_castsi128_ps(vf${N}));
+ output += ${BATCH_TILE};
+ }
+ for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+ i += 8;
+
+ const __m128i vsign = _mm_and_si128(vh, vsign_mask);
+
+ const __m128i vnonsign = _mm_xor_si128(vh, vsign);
+
+ const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
+ const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
+
+ $if SSE == 4:
+ const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask)));
+ $else:
+ const __m128i vxmask_lo = _mm_unpacklo_epi16(vmask, vmask);
+ const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
+ _mm_or_si128(_mm_and_si128(vxmask_lo, vnorm_lo), _mm_andnot_si128(vxmask_lo, vdenorm_lo)));
+
+ $if SSE == 4:
+ const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask)));
+ $else:
+ const __m128i vxmask_hi = _mm_unpackhi_epi16(vmask, vmask);
+ const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
+ _mm_or_si128(_mm_and_si128(vxmask_hi, vnorm_hi), _mm_andnot_si128(vxmask_hi, vdenorm_hi)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
+ output += 8;
+ }
+ if XNN_UNPREDICTABLE(n != 0) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+
+ const __m128i vsign = _mm_and_si128(vh, vsign_mask);
+
+ const __m128i vnonsign = _mm_xor_si128(vh, vsign);
+
+ const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
+ const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
+
+ $if SSE == 4:
+ __m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_lo, vnorm_lo, _mm_cvtepi16_epi32(vmask)));
+ $else:
+ const __m128i vxmask_lo = _mm_unpacklo_epi16(vmask, vmask);
+ __m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
+ _mm_or_si128(_mm_and_si128(vxmask_lo, vnorm_lo), _mm_andnot_si128(vxmask_lo, vdenorm_lo)));
+
+ if (n & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf));
+ output += 4;
+
+ $if SSE == 4:
+ vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
+ _mm_blendv_epi8(vdenorm_hi, vnorm_hi, _mm_unpackhi_epi16(vmask, vmask)));
+ $else:
+ const __m128i vxmask_hi = _mm_unpackhi_epi16(vmask, vmask);
+ vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
+ _mm_or_si128(_mm_and_si128(vxmask_hi, vnorm_hi), _mm_andnot_si128(vxmask_hi, vdenorm_hi)));
+ }
+ if (n & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
+ output += 2;
+
+ vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
+ }
+ if (n & (1 * sizeof(float))) {
+ _mm_store_ss(output, _mm_castsi128_ps(vf));
+ }
+ }
+}
diff --git a/src/f16-f32-vcvt/sse-int32.c.in b/src/f16-f32-vcvt/sse-int32.c.in
new file mode 100644
index 0000000..b986f72
--- /dev/null
+++ b/src/f16-f32-vcvt/sse-int32.c.in
@@ -0,0 +1,162 @@
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert SSE in [2, 4]
+$assert not AVX or SSE == 4
+$assert BATCH_TILE % 8 == 0
+$assert BATCH_TILE >= 8
+$SIMD_TILE = BATCH_TILE // 8
+$SSE_HEADER = {2: "emmintrin.h", 4: "smmintrin.h"}[SSE]
+$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+#include <assert.h>
+
+#include <${SSE_HEADER}>
+
+#include <xnnpack/common.h>
+#include <xnnpack/vcvt.h>
+
+
+$ISA = "avx" if AVX else {2: "sse2", 4: "sse41"}[SSE]
+void xnn_f16_f32_vcvt_ukernel__${ISA}_int32_x${BATCH_TILE}(
+ size_t n,
+ const void* input,
+ float* output,
+ const void* params)
+{
+ assert(n != 0);
+ assert(n % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(output != NULL);
+
+ const __m128i vsign_mask = _mm_set1_epi32(0x80000000);
+ const __m128i vexp_offset = _mm_set1_epi32(0x70000000);
+ const __m128 vexp_scale = _mm_set1_ps(0x1.0p-112f);
+ const __m128i vmagic_mask = _mm_set1_epi32(0x3F000000);
+ const __m128 vmagic_bias = _mm_set1_ps(0.5f);
+ const __m128i vdenorm_cutoff = _mm_set1_epi32(0x04000000);
+
+ const uint16_t* i = (const uint16_t*) input;
+ $if BATCH_TILE > 8:
+ for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
+ const __m128i vh0 = _mm_loadu_si128((const __m128i*) i);
+ $for N in range(1, SIMD_TILE):
+ const __m128i vh${N} = _mm_loadu_si128((const __m128i*) (i + ${N * 8}));
+ i += ${BATCH_TILE};
+
+ $for N in range(SIMD_TILE):
+ const __m128i vw${2*N} = _mm_unpacklo_epi16(_mm_setzero_si128(), vh${N});
+ const __m128i vw${2*N+1} = _mm_unpackhi_epi16(_mm_setzero_si128(), vh${N});
+
+ $for N in range(2*SIMD_TILE):
+ const __m128i vsign${N} = _mm_and_si128(vw${N}, vsign_mask);
+
+ $for N in range(2*SIMD_TILE):
+ const __m128i vnonsign${N} = _mm_xor_si128(vw${N}, vsign${N});
+
+ $for N in range(2*SIMD_TILE):
+ const __m128i vnorm${N} = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign${N}, 3), vexp_offset)), vexp_scale));
+
+ $for N in range(2*SIMD_TILE):
+ const __m128i vdenorm${N} = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign${N}, 16), vmagic_mask)), vmagic_bias));
+
+ $for N in range(2*SIMD_TILE):
+ const __m128i vmask${N} = _mm_cmpgt_epi32(vnonsign${N}, vdenorm_cutoff);
+
+ $for N in range(2*SIMD_TILE):
+ $if SSE == 4:
+ const __m128i vf${N} = _mm_or_si128(vsign${N}, _mm_blendv_epi8(vdenorm${N}, vnorm${N}, vmask${N}));
+ $else:
+ const __m128i vf${N} = _mm_or_si128(vsign${N},
+ _mm_or_si128(_mm_and_si128(vmask${N}, vnorm${N}), _mm_andnot_si128(vmask${N}, vdenorm${N})));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf0));
+ $for N in range(1, 2*SIMD_TILE):
+ _mm_storeu_ps(output + ${N * 4}, _mm_castsi128_ps(vf${N}));
+ output += ${BATCH_TILE};
+ }
+ for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+ i += 8;
+
+ const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
+ const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
+
+ const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
+ const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
+
+ const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
+ const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
+ $if SSE == 4:
+ const __m128i vf_lo = _mm_or_si128(vsign_lo, _mm_blendv_epi8(vdenorm_lo, vnorm_lo, vmask_lo));
+ $else:
+ const __m128i vf_lo = _mm_or_si128(vsign_lo,
+ _mm_or_si128(_mm_and_si128(vmask_lo, vnorm_lo), _mm_andnot_si128(vmask_lo, vdenorm_lo)));
+
+ const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
+ $if SSE == 4:
+ const __m128i vf_hi = _mm_or_si128(vsign_hi, _mm_blendv_epi8(vdenorm_hi, vnorm_hi, vmask_hi));
+ $else:
+ const __m128i vf_hi = _mm_or_si128(vsign_hi,
+ _mm_or_si128(_mm_and_si128(vmask_hi, vnorm_hi), _mm_andnot_si128(vmask_hi, vdenorm_hi)));
+
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
+ _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
+ output += 8;
+ }
+ if XNN_UNPREDICTABLE(n != 0) {
+ const __m128i vh = _mm_loadu_si128((const __m128i*) i);
+
+ const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
+ const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
+
+ const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
+ const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
+
+ const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
+ const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
+
+ const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
+ const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
+
+ const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_mask)), vmagic_bias));
+ const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_mask)), vmagic_bias));
+
+ const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
+ $if SSE == 4:
+ __m128i vf = _mm_or_si128(vsign_lo, _mm_blendv_epi8(vdenorm_lo, vnorm_lo, vmask_lo));
+ $else:
+ __m128i vf = _mm_or_si128(vsign_lo,
+ _mm_or_si128(_mm_and_si128(vmask_lo, vnorm_lo), _mm_andnot_si128(vmask_lo, vdenorm_lo)));
+
+ if (n & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, _mm_castsi128_ps(vf));
+ output += 4;
+
+ const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
+ $if SSE == 4:
+ vf = _mm_or_si128(vsign_hi, _mm_blendv_epi8(vdenorm_hi, vnorm_hi, vmask_hi));
+ $else:
+ vf = _mm_or_si128(vsign_hi,
+ _mm_or_si128(_mm_and_si128(vmask_hi, vnorm_hi), _mm_andnot_si128(vmask_hi, vdenorm_hi)));
+ }
+ if (n & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
+ output += 2;
+
+ vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
+ }
+ if (n & (1 * sizeof(float))) {
+ _mm_store_ss(output, _mm_castsi128_ps(vf));
+ }
+ }
+}
diff --git a/src/xnnpack/vcvt.h b/src/xnnpack/vcvt.h
index f0cf0af..dbe6df0 100644
--- a/src/xnnpack/vcvt.h
+++ b/src/xnnpack/vcvt.h
@@ -26,6 +26,36 @@
DECLARE_F16_F32_VCVT_UKERNEL_FUNCTION(xnn_f16_f32_vcvt_ukernel__neonfp16_x8)
DECLARE_F16_F32_VCVT_UKERNEL_FUNCTION(xnn_f16_f32_vcvt_ukernel__neonfp16_x16)
+DECLARE_F16_F32_VCVT_UKERNEL_FUNCTION(xnn_f16_f32_vcvt_ukernel__sse2_int16_x8)
+DECLARE_F16_F32_VCVT_UKERNEL_FUNCTION(xnn_f16_f32_vcvt_ukernel__sse2_int16_x16)
+DECLARE_F16_F32_VCVT_UKERNEL_FUNCTION(xnn_f16_f32_vcvt_ukernel__sse2_int16_x24)
+DECLARE_F16_F32_VCVT_UKERNEL_FUNCTION(xnn_f16_f32_vcvt_ukernel__sse2_int16_x32)
+
+DECLARE_F16_F32_VCVT_UKERNEL_FUNCTION(xnn_f16_f32_vcvt_ukernel__sse2_int32_x8)
+DECLARE_F16_F32_VCVT_UKERNEL_FUNCTION(xnn_f16_f32_vcvt_ukernel__sse2_int32_x16)
+DECLARE_F16_F32_VCVT_UKERNEL_FUNCTION(xnn_f16_f32_vcvt_ukernel__sse2_int32_x24)
+DECLARE_F16_F32_VCVT_UKERNEL_FUNCTION(xnn_f16_f32_vcvt_ukernel__sse2_int32_x32)
+
+DECLARE_F16_F32_VCVT_UKERNEL_FUNCTION(xnn_f16_f32_vcvt_ukernel__sse41_int16_x8)
+DECLARE_F16_F32_VCVT_UKERNEL_FUNCTION(xnn_f16_f32_vcvt_ukernel__sse41_int16_x16)
+DECLARE_F16_F32_VCVT_UKERNEL_FUNCTION(xnn_f16_f32_vcvt_ukernel__sse41_int16_x24)
+DECLARE_F16_F32_VCVT_UKERNEL_FUNCTION(xnn_f16_f32_vcvt_ukernel__sse41_int16_x32)
+
+DECLARE_F16_F32_VCVT_UKERNEL_FUNCTION(xnn_f16_f32_vcvt_ukernel__sse41_int32_x8)
+DECLARE_F16_F32_VCVT_UKERNEL_FUNCTION(xnn_f16_f32_vcvt_ukernel__sse41_int32_x16)
+DECLARE_F16_F32_VCVT_UKERNEL_FUNCTION(xnn_f16_f32_vcvt_ukernel__sse41_int32_x24)
+DECLARE_F16_F32_VCVT_UKERNEL_FUNCTION(xnn_f16_f32_vcvt_ukernel__sse41_int32_x32)
+
+DECLARE_F16_F32_VCVT_UKERNEL_FUNCTION(xnn_f16_f32_vcvt_ukernel__avx_int16_x8)
+DECLARE_F16_F32_VCVT_UKERNEL_FUNCTION(xnn_f16_f32_vcvt_ukernel__avx_int16_x16)
+DECLARE_F16_F32_VCVT_UKERNEL_FUNCTION(xnn_f16_f32_vcvt_ukernel__avx_int16_x24)
+DECLARE_F16_F32_VCVT_UKERNEL_FUNCTION(xnn_f16_f32_vcvt_ukernel__avx_int16_x32)
+
+DECLARE_F16_F32_VCVT_UKERNEL_FUNCTION(xnn_f16_f32_vcvt_ukernel__avx_int32_x8)
+DECLARE_F16_F32_VCVT_UKERNEL_FUNCTION(xnn_f16_f32_vcvt_ukernel__avx_int32_x16)
+DECLARE_F16_F32_VCVT_UKERNEL_FUNCTION(xnn_f16_f32_vcvt_ukernel__avx_int32_x24)
+DECLARE_F16_F32_VCVT_UKERNEL_FUNCTION(xnn_f16_f32_vcvt_ukernel__avx_int32_x32)
+
DECLARE_F16_F32_VCVT_UKERNEL_FUNCTION(xnn_f16_f32_vcvt_ukernel__f16c_x8)
DECLARE_F16_F32_VCVT_UKERNEL_FUNCTION(xnn_f16_f32_vcvt_ukernel__f16c_x16)
diff --git a/test/f16-f32-vcvt.cc b/test/f16-f32-vcvt.cc
index a164af2..5b02f53 100644
--- a/test/f16-f32-vcvt.cc
+++ b/test/f16-f32-vcvt.cc
@@ -92,6 +92,894 @@
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F16_F32_VCVT__SSE2_INT16_X8, batch_eq_8) {
+ TEST_REQUIRES_X86_SSE2;
+ VCvtMicrokernelTester()
+ .batch_size(8)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int16_x8);
+ }
+
+ TEST(F16_F32_VCVT__SSE2_INT16_X8, batch_div_8) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int16_x8);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE2_INT16_X8, batch_lt_8) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int16_x8);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE2_INT16_X8, batch_gt_8) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int16_x8);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F16_F32_VCVT__SSE2_INT16_X16, batch_eq_16) {
+ TEST_REQUIRES_X86_SSE2;
+ VCvtMicrokernelTester()
+ .batch_size(16)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int16_x16);
+ }
+
+ TEST(F16_F32_VCVT__SSE2_INT16_X16, batch_div_16) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int16_x16);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE2_INT16_X16, batch_lt_16) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int16_x16);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE2_INT16_X16, batch_gt_16) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int16_x16);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F16_F32_VCVT__SSE2_INT16_X24, batch_eq_24) {
+ TEST_REQUIRES_X86_SSE2;
+ VCvtMicrokernelTester()
+ .batch_size(24)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int16_x24);
+ }
+
+ TEST(F16_F32_VCVT__SSE2_INT16_X24, batch_div_24) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t batch_size = 48; batch_size < 240; batch_size += 24) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int16_x24);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE2_INT16_X24, batch_lt_24) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t batch_size = 1; batch_size < 24; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int16_x24);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE2_INT16_X24, batch_gt_24) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t batch_size = 25; batch_size < 48; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int16_x24);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F16_F32_VCVT__SSE2_INT16_X32, batch_eq_32) {
+ TEST_REQUIRES_X86_SSE2;
+ VCvtMicrokernelTester()
+ .batch_size(32)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int16_x32);
+ }
+
+ TEST(F16_F32_VCVT__SSE2_INT16_X32, batch_div_32) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t batch_size = 64; batch_size < 320; batch_size += 32) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int16_x32);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE2_INT16_X32, batch_lt_32) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t batch_size = 1; batch_size < 32; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int16_x32);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE2_INT16_X32, batch_gt_32) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t batch_size = 33; batch_size < 64; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int16_x32);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F16_F32_VCVT__SSE2_INT32_X8, batch_eq_8) {
+ TEST_REQUIRES_X86_SSE2;
+ VCvtMicrokernelTester()
+ .batch_size(8)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int32_x8);
+ }
+
+ TEST(F16_F32_VCVT__SSE2_INT32_X8, batch_div_8) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int32_x8);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE2_INT32_X8, batch_lt_8) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int32_x8);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE2_INT32_X8, batch_gt_8) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int32_x8);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F16_F32_VCVT__SSE2_INT32_X16, batch_eq_16) {
+ TEST_REQUIRES_X86_SSE2;
+ VCvtMicrokernelTester()
+ .batch_size(16)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int32_x16);
+ }
+
+ TEST(F16_F32_VCVT__SSE2_INT32_X16, batch_div_16) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int32_x16);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE2_INT32_X16, batch_lt_16) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int32_x16);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE2_INT32_X16, batch_gt_16) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int32_x16);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F16_F32_VCVT__SSE2_INT32_X24, batch_eq_24) {
+ TEST_REQUIRES_X86_SSE2;
+ VCvtMicrokernelTester()
+ .batch_size(24)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int32_x24);
+ }
+
+ TEST(F16_F32_VCVT__SSE2_INT32_X24, batch_div_24) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t batch_size = 48; batch_size < 240; batch_size += 24) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int32_x24);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE2_INT32_X24, batch_lt_24) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t batch_size = 1; batch_size < 24; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int32_x24);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE2_INT32_X24, batch_gt_24) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t batch_size = 25; batch_size < 48; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int32_x24);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F16_F32_VCVT__SSE2_INT32_X32, batch_eq_32) {
+ TEST_REQUIRES_X86_SSE2;
+ VCvtMicrokernelTester()
+ .batch_size(32)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int32_x32);
+ }
+
+ TEST(F16_F32_VCVT__SSE2_INT32_X32, batch_div_32) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t batch_size = 64; batch_size < 320; batch_size += 32) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int32_x32);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE2_INT32_X32, batch_lt_32) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t batch_size = 1; batch_size < 32; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int32_x32);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE2_INT32_X32, batch_gt_32) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t batch_size = 33; batch_size < 64; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse2_int32_x32);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F16_F32_VCVT__SSE41_INT16_X8, batch_eq_8) {
+ TEST_REQUIRES_X86_SSE41;
+ VCvtMicrokernelTester()
+ .batch_size(8)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int16_x8);
+ }
+
+ TEST(F16_F32_VCVT__SSE41_INT16_X8, batch_div_8) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int16_x8);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE41_INT16_X8, batch_lt_8) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int16_x8);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE41_INT16_X8, batch_gt_8) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int16_x8);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F16_F32_VCVT__SSE41_INT16_X16, batch_eq_16) {
+ TEST_REQUIRES_X86_SSE41;
+ VCvtMicrokernelTester()
+ .batch_size(16)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int16_x16);
+ }
+
+ TEST(F16_F32_VCVT__SSE41_INT16_X16, batch_div_16) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int16_x16);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE41_INT16_X16, batch_lt_16) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int16_x16);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE41_INT16_X16, batch_gt_16) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int16_x16);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F16_F32_VCVT__SSE41_INT16_X24, batch_eq_24) {
+ TEST_REQUIRES_X86_SSE41;
+ VCvtMicrokernelTester()
+ .batch_size(24)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int16_x24);
+ }
+
+ TEST(F16_F32_VCVT__SSE41_INT16_X24, batch_div_24) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t batch_size = 48; batch_size < 240; batch_size += 24) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int16_x24);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE41_INT16_X24, batch_lt_24) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t batch_size = 1; batch_size < 24; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int16_x24);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE41_INT16_X24, batch_gt_24) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t batch_size = 25; batch_size < 48; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int16_x24);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F16_F32_VCVT__SSE41_INT16_X32, batch_eq_32) {
+ TEST_REQUIRES_X86_SSE41;
+ VCvtMicrokernelTester()
+ .batch_size(32)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int16_x32);
+ }
+
+ TEST(F16_F32_VCVT__SSE41_INT16_X32, batch_div_32) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t batch_size = 64; batch_size < 320; batch_size += 32) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int16_x32);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE41_INT16_X32, batch_lt_32) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t batch_size = 1; batch_size < 32; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int16_x32);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE41_INT16_X32, batch_gt_32) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t batch_size = 33; batch_size < 64; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int16_x32);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F16_F32_VCVT__SSE41_INT32_X8, batch_eq_8) {
+ TEST_REQUIRES_X86_SSE41;
+ VCvtMicrokernelTester()
+ .batch_size(8)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int32_x8);
+ }
+
+ TEST(F16_F32_VCVT__SSE41_INT32_X8, batch_div_8) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int32_x8);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE41_INT32_X8, batch_lt_8) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int32_x8);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE41_INT32_X8, batch_gt_8) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int32_x8);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F16_F32_VCVT__SSE41_INT32_X16, batch_eq_16) {
+ TEST_REQUIRES_X86_SSE41;
+ VCvtMicrokernelTester()
+ .batch_size(16)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int32_x16);
+ }
+
+ TEST(F16_F32_VCVT__SSE41_INT32_X16, batch_div_16) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int32_x16);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE41_INT32_X16, batch_lt_16) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int32_x16);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE41_INT32_X16, batch_gt_16) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int32_x16);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F16_F32_VCVT__SSE41_INT32_X24, batch_eq_24) {
+ TEST_REQUIRES_X86_SSE41;
+ VCvtMicrokernelTester()
+ .batch_size(24)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int32_x24);
+ }
+
+ TEST(F16_F32_VCVT__SSE41_INT32_X24, batch_div_24) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t batch_size = 48; batch_size < 240; batch_size += 24) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int32_x24);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE41_INT32_X24, batch_lt_24) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t batch_size = 1; batch_size < 24; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int32_x24);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE41_INT32_X24, batch_gt_24) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t batch_size = 25; batch_size < 48; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int32_x24);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F16_F32_VCVT__SSE41_INT32_X32, batch_eq_32) {
+ TEST_REQUIRES_X86_SSE41;
+ VCvtMicrokernelTester()
+ .batch_size(32)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int32_x32);
+ }
+
+ TEST(F16_F32_VCVT__SSE41_INT32_X32, batch_div_32) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t batch_size = 64; batch_size < 320; batch_size += 32) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int32_x32);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE41_INT32_X32, batch_lt_32) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t batch_size = 1; batch_size < 32; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int32_x32);
+ }
+ }
+
+ TEST(F16_F32_VCVT__SSE41_INT32_X32, batch_gt_32) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t batch_size = 33; batch_size < 64; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__sse41_int32_x32);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F16_F32_VCVT__AVX_INT16_X8, batch_eq_8) {
+ TEST_REQUIRES_X86_AVX;
+ VCvtMicrokernelTester()
+ .batch_size(8)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int16_x8);
+ }
+
+ TEST(F16_F32_VCVT__AVX_INT16_X8, batch_div_8) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int16_x8);
+ }
+ }
+
+ TEST(F16_F32_VCVT__AVX_INT16_X8, batch_lt_8) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int16_x8);
+ }
+ }
+
+ TEST(F16_F32_VCVT__AVX_INT16_X8, batch_gt_8) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int16_x8);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F16_F32_VCVT__AVX_INT16_X16, batch_eq_16) {
+ TEST_REQUIRES_X86_AVX;
+ VCvtMicrokernelTester()
+ .batch_size(16)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int16_x16);
+ }
+
+ TEST(F16_F32_VCVT__AVX_INT16_X16, batch_div_16) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int16_x16);
+ }
+ }
+
+ TEST(F16_F32_VCVT__AVX_INT16_X16, batch_lt_16) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int16_x16);
+ }
+ }
+
+ TEST(F16_F32_VCVT__AVX_INT16_X16, batch_gt_16) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int16_x16);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F16_F32_VCVT__AVX_INT16_X24, batch_eq_24) {
+ TEST_REQUIRES_X86_AVX;
+ VCvtMicrokernelTester()
+ .batch_size(24)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int16_x24);
+ }
+
+ TEST(F16_F32_VCVT__AVX_INT16_X24, batch_div_24) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t batch_size = 48; batch_size < 240; batch_size += 24) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int16_x24);
+ }
+ }
+
+ TEST(F16_F32_VCVT__AVX_INT16_X24, batch_lt_24) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t batch_size = 1; batch_size < 24; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int16_x24);
+ }
+ }
+
+ TEST(F16_F32_VCVT__AVX_INT16_X24, batch_gt_24) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t batch_size = 25; batch_size < 48; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int16_x24);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F16_F32_VCVT__AVX_INT16_X32, batch_eq_32) {
+ TEST_REQUIRES_X86_AVX;
+ VCvtMicrokernelTester()
+ .batch_size(32)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int16_x32);
+ }
+
+ TEST(F16_F32_VCVT__AVX_INT16_X32, batch_div_32) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t batch_size = 64; batch_size < 320; batch_size += 32) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int16_x32);
+ }
+ }
+
+ TEST(F16_F32_VCVT__AVX_INT16_X32, batch_lt_32) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t batch_size = 1; batch_size < 32; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int16_x32);
+ }
+ }
+
+ TEST(F16_F32_VCVT__AVX_INT16_X32, batch_gt_32) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t batch_size = 33; batch_size < 64; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int16_x32);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F16_F32_VCVT__AVX_INT32_X8, batch_eq_8) {
+ TEST_REQUIRES_X86_AVX;
+ VCvtMicrokernelTester()
+ .batch_size(8)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int32_x8);
+ }
+
+ TEST(F16_F32_VCVT__AVX_INT32_X8, batch_div_8) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int32_x8);
+ }
+ }
+
+ TEST(F16_F32_VCVT__AVX_INT32_X8, batch_lt_8) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int32_x8);
+ }
+ }
+
+ TEST(F16_F32_VCVT__AVX_INT32_X8, batch_gt_8) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int32_x8);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F16_F32_VCVT__AVX_INT32_X16, batch_eq_16) {
+ TEST_REQUIRES_X86_AVX;
+ VCvtMicrokernelTester()
+ .batch_size(16)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int32_x16);
+ }
+
+ TEST(F16_F32_VCVT__AVX_INT32_X16, batch_div_16) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int32_x16);
+ }
+ }
+
+ TEST(F16_F32_VCVT__AVX_INT32_X16, batch_lt_16) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int32_x16);
+ }
+ }
+
+ TEST(F16_F32_VCVT__AVX_INT32_X16, batch_gt_16) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int32_x16);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F16_F32_VCVT__AVX_INT32_X24, batch_eq_24) {
+ TEST_REQUIRES_X86_AVX;
+ VCvtMicrokernelTester()
+ .batch_size(24)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int32_x24);
+ }
+
+ TEST(F16_F32_VCVT__AVX_INT32_X24, batch_div_24) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t batch_size = 48; batch_size < 240; batch_size += 24) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int32_x24);
+ }
+ }
+
+ TEST(F16_F32_VCVT__AVX_INT32_X24, batch_lt_24) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t batch_size = 1; batch_size < 24; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int32_x24);
+ }
+ }
+
+ TEST(F16_F32_VCVT__AVX_INT32_X24, batch_gt_24) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t batch_size = 25; batch_size < 48; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int32_x24);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F16_F32_VCVT__AVX_INT32_X32, batch_eq_32) {
+ TEST_REQUIRES_X86_AVX;
+ VCvtMicrokernelTester()
+ .batch_size(32)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int32_x32);
+ }
+
+ TEST(F16_F32_VCVT__AVX_INT32_X32, batch_div_32) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t batch_size = 64; batch_size < 320; batch_size += 32) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int32_x32);
+ }
+ }
+
+ TEST(F16_F32_VCVT__AVX_INT32_X32, batch_lt_32) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t batch_size = 1; batch_size < 32; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int32_x32);
+ }
+ }
+
+ TEST(F16_F32_VCVT__AVX_INT32_X32, batch_gt_32) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t batch_size = 33; batch_size < 64; batch_size++) {
+ VCvtMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_f16_f32_vcvt_ukernel__avx_int32_x32);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
TEST(F16_F32_VCVT__F16C_X8, batch_eq_8) {
TEST_REQUIRES_X86_F16C;
VCvtMicrokernelTester()
diff --git a/test/f16-f32-vcvt.yaml b/test/f16-f32-vcvt.yaml
index b921751..7084cd1 100644
--- a/test/f16-f32-vcvt.yaml
+++ b/test/f16-f32-vcvt.yaml
@@ -5,6 +5,30 @@
- name: xnn_f16_f32_vcvt_ukernel__neonfp16_x8
- name: xnn_f16_f32_vcvt_ukernel__neonfp16_x16
+- name: xnn_f16_f32_vcvt_ukernel__sse2_int16_x8
+- name: xnn_f16_f32_vcvt_ukernel__sse2_int16_x16
+- name: xnn_f16_f32_vcvt_ukernel__sse2_int16_x24
+- name: xnn_f16_f32_vcvt_ukernel__sse2_int16_x32
+- name: xnn_f16_f32_vcvt_ukernel__sse2_int32_x8
+- name: xnn_f16_f32_vcvt_ukernel__sse2_int32_x16
+- name: xnn_f16_f32_vcvt_ukernel__sse2_int32_x24
+- name: xnn_f16_f32_vcvt_ukernel__sse2_int32_x32
+- name: xnn_f16_f32_vcvt_ukernel__sse41_int16_x8
+- name: xnn_f16_f32_vcvt_ukernel__sse41_int16_x16
+- name: xnn_f16_f32_vcvt_ukernel__sse41_int16_x24
+- name: xnn_f16_f32_vcvt_ukernel__sse41_int16_x32
+- name: xnn_f16_f32_vcvt_ukernel__sse41_int32_x8
+- name: xnn_f16_f32_vcvt_ukernel__sse41_int32_x16
+- name: xnn_f16_f32_vcvt_ukernel__sse41_int32_x24
+- name: xnn_f16_f32_vcvt_ukernel__sse41_int32_x32
+- name: xnn_f16_f32_vcvt_ukernel__avx_int16_x8
+- name: xnn_f16_f32_vcvt_ukernel__avx_int16_x16
+- name: xnn_f16_f32_vcvt_ukernel__avx_int16_x24
+- name: xnn_f16_f32_vcvt_ukernel__avx_int16_x32
+- name: xnn_f16_f32_vcvt_ukernel__avx_int32_x8
+- name: xnn_f16_f32_vcvt_ukernel__avx_int32_x16
+- name: xnn_f16_f32_vcvt_ukernel__avx_int32_x24
+- name: xnn_f16_f32_vcvt_ukernel__avx_int32_x32
- name: xnn_f16_f32_vcvt_ukernel__f16c_x8
- name: xnn_f16_f32_vcvt_ukernel__f16c_x16
- name: xnn_f16_f32_vcvt_ukernel__avx512skx_x16