Evaluation stubs and tests for FP16->FP32 conversion

PiperOrigin-RevId: 405526689
diff --git a/BUILD.bazel b/BUILD.bazel
index e34670e..24edf67 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -2733,6 +2733,7 @@
     "src/f32-f16-vcvt/gen/vcvt-neonfp16-x8.c",
     "src/f32-f16-vcvt/gen/vcvt-neonfp16-x16.c",
     "src/math/cvt-f16-f32-neonfp16.c",
+    "src/math/cvt-f32-f16-neonfp16.c",
 ]
 
 PROD_NEONFMA_MICROKERNEL_SRCS = [
@@ -4766,6 +4767,7 @@
     "src/f32-f16-vcvt/gen/vcvt-f16c-x8.c",
     "src/f32-f16-vcvt/gen/vcvt-f16c-x16.c",
     "src/math/cvt-f16-f32-f16c.c",
+    "src/math/cvt-f32-f16-f16c.c",
 ]
 
 PROD_XOP_MICROKERNEL_SRCS = [
@@ -8732,6 +8734,17 @@
 )
 
 xnnpack_unit_test(
+    name = "f32_f16_cvt_eval",
+    srcs = [
+        "eval/f32-f16-cvt.cc",
+        "src/xnnpack/AlignedAllocator.h",
+        "src/xnnpack/math-stubs.h",
+    ] + MICROKERNEL_TEST_HDRS,
+    automatic = False,
+    deps = MICROKERNEL_TEST_DEPS,
+)
+
+xnnpack_unit_test(
     name = "f32_exp_eval",
     srcs = [
         "eval/f32-exp.cc",
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 987a85e..51bb58d 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1782,7 +1782,8 @@
   src/f16-f32-vcvt/gen/vcvt-neonfp16-x16.c
   src/f32-f16-vcvt/gen/vcvt-neonfp16-x8.c
   src/f32-f16-vcvt/gen/vcvt-neonfp16-x16.c
-  src/math/cvt-f16-f32-neonfp16.c)
+  src/math/cvt-f16-f32-neonfp16.c
+  src/math/cvt-f32-f16-neonfp16.c)
 
 SET(PROD_NEONFMA_MICROKERNEL_SRCS
   src/f32-dwconv/gen/up8x4-minmax-neonfma.c
@@ -3791,7 +3792,8 @@
   src/f16-f32-vcvt/gen/vcvt-f16c-x16.c
   src/f32-f16-vcvt/gen/vcvt-f16c-x8.c
   src/f32-f16-vcvt/gen/vcvt-f16c-x16.c
-  src/math/cvt-f16-f32-f16c.c)
+  src/math/cvt-f16-f32-f16c.c
+  src/math/cvt-f32-f16-f16c.c)
 
 SET(PROD_XOP_MICROKERNEL_SRCS
   src/qc8-dwconv/gen/up16x9-minmax-fp32-xop-mul16-add16.c
@@ -7136,6 +7138,14 @@
   TARGET_INCLUDE_DIRECTORIES(f16-f32-cvt-eval PRIVATE include src)
   TARGET_LINK_LIBRARIES(f16-f32-cvt-eval PRIVATE cpuinfo fp16 pthreadpool gtest gtest_main)
 
+  ADD_EXECUTABLE(f32-f16-cvt-eval eval/f32-f16-cvt.cc $<TARGET_OBJECTS:all_microkernels>)
+  SET_TARGET_PROPERTIES(f32-f16-cvt-eval PROPERTIES
+    CXX_STANDARD 11
+    CXX_STANDARD_REQUIRED YES
+    CXX_EXTENSIONS NO)
+  TARGET_INCLUDE_DIRECTORIES(f32-f16-cvt-eval PRIVATE include src)
+  TARGET_LINK_LIBRARIES(f32-f16-cvt-eval PRIVATE cpuinfo fp16 pthreadpool gtest gtest_main)
+
   ADD_EXECUTABLE(f32-exp-eval eval/f32-exp.cc $<TARGET_OBJECTS:all_microkernels>)
   SET_TARGET_PROPERTIES(f32-exp-eval PROPERTIES
     CXX_STANDARD 11
diff --git a/eval/f32-f16-cvt.cc b/eval/f32-f16-cvt.cc
new file mode 100644
index 0000000..2eebe83
--- /dev/null
+++ b/eval/f32-f16-cvt.cc
@@ -0,0 +1,539 @@
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <algorithm>
+#include <cmath>
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib>
+#include <iomanip>
+#include <ios>
+#include <vector>
+
+#include <gtest/gtest.h>
+
+#include <fp16.h>
+
+#include <xnnpack/AlignedAllocator.h>
+#include <xnnpack/common.h>
+#include <xnnpack/isa-checks.h>
+#include <xnnpack/math-stubs.h>
+
+
+constexpr int kBlockSize = 1024;
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(CVT__F16C, positive_normal) {
+    TEST_REQUIRES_X86_F16C;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x387FE000); n < UINT32_C(0x477FF000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(n + i);
+      }
+      xnn_math_f32_f16_cvt__f16c(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint16_t reference_output = fp16_ieee_from_fp32_value(inputs[i]);
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+
+  TEST(CVT__F16C, negative_normal) {
+    TEST_REQUIRES_X86_F16C;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xB87FE000); n < UINT32_C(0xC77FF000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(n + i);
+      }
+      xnn_math_f32_f16_cvt__f16c(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint16_t reference_output = fp16_ieee_from_fp32_value(inputs[i]);
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+
+  TEST(CVT__F16C, positive_subnormal) {
+    TEST_REQUIRES_X86_F16C;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x33000001); n < UINT32_C(0x387FE000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min<uint32_t>(n + i, UINT32_C(0x387FDFFF)));
+      }
+      xnn_math_f32_f16_cvt__f16c(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint16_t reference_output = fp16_ieee_from_fp32_value(inputs[i]);
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+
+  TEST(CVT__F16C, negative_subnormal) {
+    TEST_REQUIRES_X86_F16C;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xB3000001); n < UINT32_C(0xB87FE000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min<uint32_t>(n + i, UINT32_C(0xB87FDFFF)));
+      }
+      xnn_math_f32_f16_cvt__f16c(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint16_t reference_output = fp16_ieee_from_fp32_value(inputs[i]);
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+
+  TEST(CVT__F16C, positive_underflow) {
+    TEST_REQUIRES_X86_F16C;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x00000001); n < UINT32_C(0x33000001); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(n + i);
+      }
+      xnn_math_f32_f16_cvt__f16c(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint16_t reference_output = UINT16_C(0x0000);
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+
+  TEST(CVT__F16C, negative_underflow) {
+    TEST_REQUIRES_X86_F16C;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x80000001); n < UINT32_C(0xB3000001); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(n + i);
+      }
+      xnn_math_f32_f16_cvt__f16c(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint16_t reference_output = UINT16_C(0x8000);
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+
+  TEST(CVT__F16C, positive_zero) {
+    TEST_REQUIRES_X86_F16C;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), +0.0f);
+    xnn_math_f32_f16_cvt__f16c(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+    const uint16_t reference_output = UINT16_C(0x0000);
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+      << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0];
+  }
+
+  TEST(CVT__F16C, negative_zero) {
+    TEST_REQUIRES_X86_F16C;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_f16_cvt__f16c(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+    const uint16_t reference_output = UINT16_C(0x8000);
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+      << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0];
+  }
+
+  TEST(CVT__F16C, positive_overflow) {
+    TEST_REQUIRES_X86_F16C;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x477FF000); n < UINT32_C(0x7F800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(n + i);
+      }
+      xnn_math_f32_f16_cvt__f16c(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint16_t reference_output = UINT16_C(0x7C00);
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+
+  TEST(CVT__F16C, negative_overflow) {
+    TEST_REQUIRES_X86_F16C;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC77FF000); n < UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(n + i);
+      }
+      xnn_math_f32_f16_cvt__f16c(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint16_t reference_output = UINT16_C(0xFC00);
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+
+  TEST(CVT__F16C, positive_infinity) {
+    TEST_REQUIRES_X86_F16C;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
+    xnn_math_f32_f16_cvt__f16c(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+    const uint16_t reference_output = UINT16_C(0x7C00);
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+      << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0];
+  }
+
+  TEST(CVT__F16C, negative_infinity) {
+    TEST_REQUIRES_X86_F16C;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
+    xnn_math_f32_f16_cvt__f16c(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+    const uint16_t reference_output = UINT16_C(0xFC00);
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+      << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0];
+  }
+
+  TEST(CVT__F16C, positive_nan) {
+    TEST_REQUIRES_X86_F16C;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min<uint32_t>(n + i, UINT32_C(0x7FFFFFFF)));
+      }
+      xnn_math_f32_f16_cvt__f16c(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_GT(outputs[i], UINT16_C(0x7C00))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+        ASSERT_LT(outputs[i], UINT16_C(0x8000))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+
+  TEST(CVT__F16C, negative_nan) {
+    TEST_REQUIRES_X86_F16C;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(UINT32_C(0x80000000) | std::min<uint32_t>(n + i, UINT32_C(0x7FFFFFFF)));
+      }
+      xnn_math_f32_f16_cvt__f16c(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_GT(outputs[i], UINT16_C(0xFC00))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(CVT__F16C, positive_normal) {
+    TEST_REQUIRES_ARM_NEON_FP16;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x387FE000); n < UINT32_C(0x477FF000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(n + i);
+      }
+      xnn_math_f32_f16_cvt__neonfp16(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint16_t reference_output = fp16_ieee_from_fp32_value(inputs[i]);
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+
+  TEST(CVT__F16C, negative_normal) {
+    TEST_REQUIRES_ARM_NEON_FP16;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xB87FE000); n < UINT32_C(0xC77FF000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(n + i);
+      }
+      xnn_math_f32_f16_cvt__neonfp16(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint16_t reference_output = fp16_ieee_from_fp32_value(inputs[i]);
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+
+  TEST(CVT__F16C, positive_subnormal) {
+    TEST_REQUIRES_ARM_NEON_FP16;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x33000001); n < UINT32_C(0x387FE000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min<uint32_t>(n + i, UINT32_C(0x387FDFFF)));
+      }
+      xnn_math_f32_f16_cvt__neonfp16(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint16_t reference_output = fp16_ieee_from_fp32_value(inputs[i]);
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+
+  TEST(CVT__F16C, negative_subnormal) {
+    TEST_REQUIRES_ARM_NEON_FP16;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xB3000001); n < UINT32_C(0xB87FE000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min<uint32_t>(n + i, UINT32_C(0xB87FDFFF)));
+      }
+      xnn_math_f32_f16_cvt__neonfp16(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint16_t reference_output = fp16_ieee_from_fp32_value(inputs[i]);
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+
+  TEST(CVT__F16C, positive_underflow) {
+    TEST_REQUIRES_ARM_NEON_FP16;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x00000001); n < UINT32_C(0x33000001); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(n + i);
+      }
+      xnn_math_f32_f16_cvt__neonfp16(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint16_t reference_output = UINT16_C(0x0000);
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+
+  TEST(CVT__F16C, negative_underflow) {
+    TEST_REQUIRES_ARM_NEON_FP16;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x80000001); n < UINT32_C(0xB3000001); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(n + i);
+      }
+      xnn_math_f32_f16_cvt__neonfp16(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint16_t reference_output = UINT16_C(0x8000);
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+
+  TEST(CVT__F16C, positive_zero) {
+    TEST_REQUIRES_ARM_NEON_FP16;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), +0.0f);
+    xnn_math_f32_f16_cvt__neonfp16(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+    const uint16_t reference_output = UINT16_C(0x0000);
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+      << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0];
+  }
+
+  TEST(CVT__F16C, negative_zero) {
+    TEST_REQUIRES_ARM_NEON_FP16;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -0.0f);
+    xnn_math_f32_f16_cvt__neonfp16(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+    const uint16_t reference_output = UINT16_C(0x8000);
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+      << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0];
+  }
+
+  TEST(CVT__F16C, positive_overflow) {
+    TEST_REQUIRES_ARM_NEON_FP16;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x477FF000); n < UINT32_C(0x7F800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(n + i);
+      }
+      xnn_math_f32_f16_cvt__neonfp16(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint16_t reference_output = UINT16_C(0x7C00);
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+
+  TEST(CVT__F16C, negative_overflow) {
+    TEST_REQUIRES_ARM_NEON_FP16;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0xC77FF000); n < UINT32_C(0xFF800000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(n + i);
+      }
+      xnn_math_f32_f16_cvt__neonfp16(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        const uint16_t reference_output = UINT16_C(0xFC00);
+        ASSERT_EQ(reference_output, outputs[i])
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+
+  TEST(CVT__F16C, positive_infinity) {
+    TEST_REQUIRES_ARM_NEON_FP16;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), +std::numeric_limits<float>::infinity());
+    xnn_math_f32_f16_cvt__neonfp16(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+    const uint16_t reference_output = UINT16_C(0x7C00);
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+      << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0];
+  }
+
+  TEST(CVT__F16C, negative_infinity) {
+    TEST_REQUIRES_ARM_NEON_FP16;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    std::fill(inputs.begin(), inputs.end(), -std::numeric_limits<float>::infinity());
+    xnn_math_f32_f16_cvt__neonfp16(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+    const uint16_t reference_output = UINT16_C(0xFC00);
+    ASSERT_EQ(reference_output, outputs[0])
+      << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[0])
+      << ", reference = 0x" << std::hex << std::setw(4) << std::setfill('0') << reference_output
+      << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[0];
+  }
+
+  TEST(CVT__F16C, positive_nan) {
+    TEST_REQUIRES_ARM_NEON_FP16;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(std::min<uint32_t>(n + i, UINT32_C(0x7FFFFFFF)));
+      }
+      xnn_math_f32_f16_cvt__neonfp16(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_GT(outputs[i], UINT16_C(0x7C00))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+        ASSERT_LT(outputs[i], UINT16_C(0x8000))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+
+  TEST(CVT__F16C, negative_nan) {
+    TEST_REQUIRES_ARM_NEON_FP16;
+
+    std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize);
+    std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize);
+    for (uint32_t n = UINT32_C(0x7F800001); n < UINT32_C(0x80000000); n += kBlockSize) {
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        inputs[i] = fp32_from_bits(UINT32_C(0x80000000) | std::min<uint32_t>(n + i, UINT32_C(0x7FFFFFFF)));
+      }
+      xnn_math_f32_f16_cvt__neonfp16(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data());
+      for (uint32_t i = 0; i < kBlockSize; i++) {
+        ASSERT_GT(outputs[i], UINT16_C(0xFC00))
+          << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << fp32_to_bits(inputs[i])
+          << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i];
+      }
+    }
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
diff --git a/src/math/cvt-f32-f16-f16c.c b/src/math/cvt-f32-f16-f16c.c
new file mode 100644
index 0000000..7693b2e
--- /dev/null
+++ b/src/math/cvt-f32-f16-f16c.c
@@ -0,0 +1,32 @@
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/math-stubs.h>
+
+
+void xnn_math_f32_f16_cvt__f16c(
+    size_t n,
+    const float* input,
+    void* output)
+{
+  assert(n % (8 * sizeof(uint16_t)) == 0);
+
+  uint16_t* o = (uint16_t*) output;
+  for (; n != 0; n -= 8 * sizeof(uint16_t)) {
+    const __m256 vx = _mm256_loadu_ps(input);
+    input += 8;
+
+    const __m128i vy = _mm256_cvtps_ph(vx, _MM_FROUND_NO_EXC);
+
+    _mm_storeu_si128((__m128i*) o, vy);
+    o += 8;
+  }
+}
diff --git a/src/math/cvt-f32-f16-neonfp16.c b/src/math/cvt-f32-f16-neonfp16.c
new file mode 100644
index 0000000..75816fc
--- /dev/null
+++ b/src/math/cvt-f32-f16-neonfp16.c
@@ -0,0 +1,28 @@
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/math-stubs.h>
+
+
+void xnn_math_f32_f16_cvt__neonfp16(
+    size_t n,
+    const float* input,
+    void* output)
+{
+  assert(n % (4 * sizeof(uint16_t)) == 0);
+
+  uint16_t* o = (uint16_t*) output;
+  for (; n != 0; n -= 4 * sizeof(uint16_t)) {
+    const float32x4_t vx = vld1q_f32(input); input += 4;
+    const uint16x4_t vy = vreinterpret_u16_f16(vcvt_f16_f32(vx));
+    vst1_u16(o, vy); o += 4;
+  }
+}
diff --git a/src/xnnpack/math-stubs.h b/src/xnnpack/math-stubs.h
index ffb694c..7ea61ed 100644
--- a/src/xnnpack/math-stubs.h
+++ b/src/xnnpack/math-stubs.h
@@ -35,6 +35,12 @@
     const void* input,                             \
     float* output);
 
+#define DECLARE_F32_F16_CVT_MATH_FUNCTION(fn_name) \
+  void fn_name(                                    \
+    size_t n,                                      \
+    const float* input,                            \
+    void* output);
+
 #define DECLARE_F32_EXT_UNARY_MATH_FUNCTION(fn_name) \
   void fn_name(                                      \
     size_t n,                                        \
@@ -53,6 +59,9 @@
 DECLARE_F16_F32_CVT_MATH_FUNCTION(xnn_math_f16_f32_cvt__wasmsimd_int16)
 DECLARE_F16_F32_CVT_MATH_FUNCTION(xnn_math_f16_f32_cvt__wasmsimd_int32)
 
+DECLARE_F32_F16_CVT_MATH_FUNCTION(xnn_math_f32_f16_cvt__neonfp16)
+DECLARE_F32_F16_CVT_MATH_FUNCTION(xnn_math_f32_f16_cvt__f16c)
+
 DECLARE_F32_UNARY_MATH_FUNCTION(xnn_math_f32_roundne__neon_addsub)
 DECLARE_F32_UNARY_MATH_FUNCTION(xnn_math_f32_roundne__neonv8)
 DECLARE_F32_UNARY_MATH_FUNCTION(xnn_math_f32_roundne__sse_addsub)