Evaluation stubs for WAsm SIMD F32->F16 conversion

PiperOrigin-RevId: 408510470
diff --git a/BUILD.bazel b/BUILD.bazel
index ad0151d..0765dacb 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -1790,6 +1790,7 @@
     "src/f32-vunary/gen/vsqr-wasmsimd-x8.c",
     "src/math/cvt-f16-f32-wasmsimd-int16.c",
     "src/math/cvt-f16-f32-wasmsimd-int32.c",
+    "src/math/cvt-f32-f16-wasmsimd.c",
     "src/math/expm1minus-wasmsimd-rr2-lut16-p3-andnot.c",
     "src/math/expm1minus-wasmsimd-rr2-lut16-p3-max.c",
     "src/math/expm1minus-wasmsimd-rr2-p6-andnot.c",
diff --git a/eval/f32-f16-cvt.cc b/eval/f32-f16-cvt.cc
index 66a5831..15132a0 100644
--- a/eval/f32-f16-cvt.cc
+++ b/eval/f32-f16-cvt.cc
@@ -1281,6 +1281,235 @@
   }
 #endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
 
+#if XNN_ARCH_WASMSIMD
+  TEST(CVT__WASMSIMD, positive_normal) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x387FE000); n < UINT32_C(0x477FF000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(n + i);
+      }
+      xnn_math_f32_f16_cvt__wasmsimd(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint16_t reference_output = fp16_ieee_from_fp32_value(inputs[i]);
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+
+  TEST(CVT__WASMSIMD, negative_normal) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xB87FE000); n < UINT32_C(0xC77FF000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(n + i);
+      }
+      xnn_math_f32_f16_cvt__wasmsimd(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint16_t reference_output = fp16_ieee_from_fp32_value(inputs[i]);
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+
+  TEST(CVT__WASMSIMD, positive_subnormal) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x33000001); n < UINT32_C(0x387FE000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min<uint32_t>(n + i, UINT32_C(0x387FDFFF)));
+      }
+      xnn_math_f32_f16_cvt__wasmsimd(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint16_t reference_output = fp16_ieee_from_fp32_value(inputs[i]);
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+
+  TEST(CVT__WASMSIMD, negative_subnormal) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xB3000001); n < UINT32_C(0xB87FE000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min<uint32_t>(n + i, UINT32_C(0xB87FDFFF)));
+      }
+      xnn_math_f32_f16_cvt__wasmsimd(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint16_t reference_output = fp16_ieee_from_fp32_value(inputs[i]);
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+
+  TEST(CVT__WASMSIMD, positive_underflow) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x00000001); n < UINT32_C(0x33000001); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(n + i);
+      }
+      xnn_math_f32_f16_cvt__wasmsimd(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint16_t reference_output = UINT16_C(0x0000);
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+
+  TEST(CVT__WASMSIMD, negative_underflow) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x80000001); n < UINT32_C(0xB3000001); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(n + i);
+      }
+      xnn_math_f32_f16_cvt__wasmsimd(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint16_t reference_output = UINT16_C(0x8000);
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+
+  TEST(CVT__WASMSIMD, positive_zero) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), +0.0f);
+    xnn_math_f32_f16_cvt__wasmsimd(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+    const uint16_t reference_output = UINT16_C(0x0000);
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+      << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0];
+  }
+
+  TEST(CVT__WASMSIMD, negative_zero) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_f16_cvt__wasmsimd(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+    const uint16_t reference_output = UINT16_C(0x8000);
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+      << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0];
+  }
+
+  TEST(CVT__WASMSIMD, positive_overflow) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x477FF000); n < UINT32_C(0x7F800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(n + i);
+      }
+      xnn_math_f32_f16_cvt__wasmsimd(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint16_t reference_output = UINT16_C(0x7C00);
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+
+  TEST(CVT__WASMSIMD, negative_overflow) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC77FF000); n < UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(n + i);
+      }
+      xnn_math_f32_f16_cvt__wasmsimd(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint16_t reference_output = UINT16_C(0xFC00);
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+
+  TEST(CVT__WASMSIMD, positive_infinity) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
+    xnn_math_f32_f16_cvt__wasmsimd(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+    const uint16_t reference_output = UINT16_C(0x7C00);
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+      << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0];
+  }
+
+  TEST(CVT__WASMSIMD, negative_infinity) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
+    xnn_math_f32_f16_cvt__wasmsimd(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+    const uint16_t reference_output = UINT16_C(0xFC00);
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+      << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0];
+  }
+
+  TEST(CVT__WASMSIMD, positive_nan) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min<uint32_t>(n + i, UINT32_C(0x7FFFFFFF)));
+      }
+      xnn_math_f32_f16_cvt__wasmsimd(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_GT(outputs[i], UINT16_C(0x7C00))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+        ASSERT_LT(outputs[i], UINT16_C(0x8000))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+
+  TEST(CVT__WASMSIMD, negative_nan) {
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(UINT32_C(0x80000000) | std::min<uint32_t>(n + i, UINT32_C(0x7FFFFFFF)));
+      }
+      xnn_math_f32_f16_cvt__wasmsimd(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_GT(outputs[i], UINT16_C(0xFC00))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+#endif  // XNN_ARCH_WASMSIMD
+
 TEST(CVT__SCALAR, positive_normal) {
   std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
   std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
diff --git a/src/math/cvt-f32-f16-sse2.c b/src/math/cvt-f32-f16-sse2.c
index b6a14c6..bc5fa4c 100644
--- a/src/math/cvt-f32-f16-sse2.c
+++ b/src/math/cvt-f32-f16-sse2.c
@@ -37,23 +37,22 @@
 
     const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
     const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
-    const __m128i vsignw_lo = _mm_srai_epi32(_mm_castps_si128(vx_lo), 31);
-    const __m128i vsignw_hi = _mm_srai_epi32(_mm_castps_si128(vx_hi), 31);
 
+    const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
+    const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
     __m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
     __m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
     __m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
     __m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
     const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
     const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
-    __m128i vsignh = _mm_packs_epi32(vsignw_lo, vsignw_hi);
 
     vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
     vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
     vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
     vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
     const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
-    vsignh = _mm_slli_epi16(vsignh, 15);
+    const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
 
     vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
     vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
diff --git a/src/math/cvt-f32-f16-sse41.c b/src/math/cvt-f32-f16-sse41.c
index b82f719..413aa71 100644
--- a/src/math/cvt-f32-f16-sse41.c
+++ b/src/math/cvt-f32-f16-sse41.c
@@ -37,23 +37,22 @@
 
     const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
     const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
-    const __m128i vsignw_lo = _mm_srai_epi32(_mm_castps_si128(vx_lo), 31);
-    const __m128i vsignw_hi = _mm_srai_epi32(_mm_castps_si128(vx_hi), 31);
 
+    const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
+    const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
     __m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
     __m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
     __m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
     __m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
     const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
     const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
-    __m128i vsignh = _mm_packs_epi32(vsignw_lo, vsignw_hi);
 
     vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
     vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
     vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
     vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
     const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
-    vsignh = _mm_slli_epi16(vsignh, 15);
+    const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
 
     vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
     vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
diff --git a/src/math/cvt-f32-f16-wasmsimd.c b/src/math/cvt-f32-f16-wasmsimd.c
new file mode 100644
index 0000000..33a79f8
--- /dev/null
+++ b/src/math/cvt-f32-f16-wasmsimd.c
@@ -0,0 +1,82 @@
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/math-stubs.h>
+
+
+void xnn_math_f32_f16_cvt__wasmsimd(
+    size_t n,
+    const float* input,
+    void* output)
+{
+  assert(n % (8 * sizeof(uint16_t)) == 0);
+
+  const v128_t vscale_to_inf = wasm_f32x4_const_splat(0x1.0p+112f);
+  const v128_t vscale_to_zero = wasm_f32x4_const_splat(0x1.0p-110f);
+  const v128_t vexp_bias = wasm_i32x4_const_splat(0x07800000);
+  const v128_t vexpw_max = wasm_i32x4_const_splat(0x7F800000);
+  const v128_t vbias_min = wasm_i32x4_const_splat(0x40008000);
+  const v128_t vexph_mask = wasm_i32x4_const_splat(0x7C00);
+  const v128_t vmanth_mask = wasm_i32x4_const_splat(0x0FFF);
+  const v128_t vnanh = wasm_i16x8_const_splat(0x7E00);
+
+  uint16_t* o = (uint16_t*) output;
+  for (; n != 0; n -= 8 * sizeof(uint16_t)) {
+    const v128_t vx_lo = wasm_v128_load(input);
+    const v128_t vx_hi = wasm_v128_load(input + 4);
+    input += 8;
+
+    const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo);
+    const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi);
+
+    const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo);
+    const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi);
+    v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias);
+    v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias);
+    v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf);
+    v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf);
+    const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max);
+    const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max);
+
+    vbias_lo = wasm_v128_and(vbias_lo, vexpw_max);
+    vbias_hi = wasm_v128_and(vbias_hi, vexpw_max);
+    vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero);
+    vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero);
+    const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi);
+    const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi);
+
+    vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min);
+    vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min);
+
+    vf_lo = wasm_f32x4_add(vf_lo, vbias_lo);
+    vf_hi = wasm_f32x4_add(vf_hi, vbias_hi);
+
+    v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13);
+    v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13);
+    const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask);
+    const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask);
+
+    vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask);
+    vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask);
+
+    const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo);
+    const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi);
+
+    const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi);
+
+    const v128_t vabsh = wasm_v128_bitselect(vnanh, vnonsignh, vnanmaskh);
+
+    const v128_t vh = wasm_v128_or(vabsh, vsignh);
+
+    wasm_v128_store(o, vh);
+    o += 8;
+  }
+}
diff --git a/src/xnnpack/math-stubs.h b/src/xnnpack/math-stubs.h
index 6c4a64b..db88384 100644
--- a/src/xnnpack/math-stubs.h
+++ b/src/xnnpack/math-stubs.h
@@ -64,6 +64,7 @@
 DECLARE_F32_F16_CVT_MATH_FUNCTION(xnn_math_f32_f16_cvt__sse2)
 DECLARE_F32_F16_CVT_MATH_FUNCTION(xnn_math_f32_f16_cvt__sse41)
 DECLARE_F32_F16_CVT_MATH_FUNCTION(xnn_math_f32_f16_cvt__f16c)
+DECLARE_F32_F16_CVT_MATH_FUNCTION(xnn_math_f32_f16_cvt__wasmsimd)
 DECLARE_F32_F16_CVT_MATH_FUNCTION(xnn_math_f32_f16_cvt__scalar)
 
 DECLARE_F32_UNARY_MATH_FUNCTION(xnn_math_f32_roundne__neon_addsub)