F16C implementation of F16 GAVGPOOL microkernels

PiperOrigin-RevId: 422727236
diff --git a/BUILD.bazel b/BUILD.bazel
index f86d2f2..af49b4c 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -5985,6 +5985,14 @@
 ALL_F16C_MICROKERNEL_SRCS = [
     "src/f16-f32-vcvt/gen/vcvt-f16c-x8.c",
     "src/f16-f32-vcvt/gen/vcvt-f16c-x16.c",
+    "src/f16-gavgpool/gen/7p7x-minmax-f16c-c8.c",
+    "src/f16-gavgpool/gen/7p7x-minmax-f16c-c16.c",
+    "src/f16-gavgpool/gen/7p7x-minmax-f16c-c24.c",
+    "src/f16-gavgpool/gen/7p7x-minmax-f16c-c32.c",
+    "src/f16-gavgpool/gen/7x-minmax-f16c-c8.c",
+    "src/f16-gavgpool/gen/7x-minmax-f16c-c16.c",
+    "src/f16-gavgpool/gen/7x-minmax-f16c-c24.c",
+    "src/f16-gavgpool/gen/7x-minmax-f16c-c32.c",
     "src/f16-prelu/gen/f16c-2x8.c",
     "src/f16-prelu/gen/f16c-2x16.c",
     "src/f16-vbinary/gen/vadd-minmax-f16c-x8.c",
diff --git a/CMakeLists.txt b/CMakeLists.txt
index bab93ff..12adf00 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -4737,6 +4737,14 @@
 SET(ALL_F16C_MICROKERNEL_SRCS
   src/f16-f32-vcvt/gen/vcvt-f16c-x8.c
   src/f16-f32-vcvt/gen/vcvt-f16c-x16.c
+  src/f16-gavgpool/gen/7p7x-minmax-f16c-c8.c
+  src/f16-gavgpool/gen/7p7x-minmax-f16c-c16.c
+  src/f16-gavgpool/gen/7p7x-minmax-f16c-c24.c
+  src/f16-gavgpool/gen/7p7x-minmax-f16c-c32.c
+  src/f16-gavgpool/gen/7x-minmax-f16c-c8.c
+  src/f16-gavgpool/gen/7x-minmax-f16c-c16.c
+  src/f16-gavgpool/gen/7x-minmax-f16c-c24.c
+  src/f16-gavgpool/gen/7x-minmax-f16c-c32.c
   src/f16-prelu/gen/f16c-2x8.c
   src/f16-prelu/gen/f16c-2x16.c
   src/f16-vbinary/gen/vadd-minmax-f16c-x8.c
diff --git a/scripts/generate-f16-gavgpool.sh b/scripts/generate-f16-gavgpool.sh
index 94a2e2a..19987b1 100755
--- a/scripts/generate-f16-gavgpool.sh
+++ b/scripts/generate-f16-gavgpool.sh
@@ -15,6 +15,17 @@
 tools/xngen src/f16-gavgpool/multipass-neonfp16arith.c.in -D ROW_TILE=7 -D ROW_SUBTILE=7 -D CHANNEL_TILE=24 -o src/f16-gavgpool/gen/7p7x-minmax-neonfp16arith-c24.c &
 tools/xngen src/f16-gavgpool/multipass-neonfp16arith.c.in -D ROW_TILE=7 -D ROW_SUBTILE=7 -D CHANNEL_TILE=32 -o src/f16-gavgpool/gen/7p7x-minmax-neonfp16arith-c32.c &
 
+################################### x86 F16C ###################################
+tools/xngen src/f16-gavgpool/unipass-f16c.c.in -D ROW_TILE=7 -D CHANNEL_TILE=8  -o src/f16-gavgpool/gen/7x-minmax-f16c-c8.c &
+tools/xngen src/f16-gavgpool/unipass-f16c.c.in -D ROW_TILE=7 -D CHANNEL_TILE=16 -o src/f16-gavgpool/gen/7x-minmax-f16c-c16.c &
+tools/xngen src/f16-gavgpool/unipass-f16c.c.in -D ROW_TILE=7 -D CHANNEL_TILE=24 -o src/f16-gavgpool/gen/7x-minmax-f16c-c24.c &
+tools/xngen src/f16-gavgpool/unipass-f16c.c.in -D ROW_TILE=7 -D CHANNEL_TILE=32 -o src/f16-gavgpool/gen/7x-minmax-f16c-c32.c &
+
+tools/xngen src/f16-gavgpool/multipass-f16c.c.in -D ROW_TILE=7 -D ROW_SUBTILE=7 -D CHANNEL_TILE=8  -o src/f16-gavgpool/gen/7p7x-minmax-f16c-c8.c &
+tools/xngen src/f16-gavgpool/multipass-f16c.c.in -D ROW_TILE=7 -D ROW_SUBTILE=7 -D CHANNEL_TILE=16 -o src/f16-gavgpool/gen/7p7x-minmax-f16c-c16.c &
+tools/xngen src/f16-gavgpool/multipass-f16c.c.in -D ROW_TILE=7 -D ROW_SUBTILE=7 -D CHANNEL_TILE=24 -o src/f16-gavgpool/gen/7p7x-minmax-f16c-c24.c &
+tools/xngen src/f16-gavgpool/multipass-f16c.c.in -D ROW_TILE=7 -D ROW_SUBTILE=7 -D CHANNEL_TILE=32 -o src/f16-gavgpool/gen/7p7x-minmax-f16c-c32.c &
+
 ################################## Unit tests #################################
 tools/generate-gavgpool-test.py --spec test/f16-gavgpool-minmax.yaml --output test/f16-gavgpool-minmax.cc &
 
diff --git a/src/f16-gavgpool/gen/7p7x-minmax-f16c-c16.c b/src/f16-gavgpool/gen/7p7x-minmax-f16c-c16.c
new file mode 100644
index 0000000..ef557b8
--- /dev/null
+++ b/src/f16-gavgpool/gen/7p7x-minmax-f16c-c16.c
@@ -0,0 +1,295 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f16-gavgpool/multipass-f16c.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2022 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/gavgpool.h>
+#include <xnnpack/math.h>
+
+
+void xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16(
+    size_t rows,
+    size_t channels,
+    const void* input,
+    size_t input_stride,
+    const void* zero,
+    void* buffer,
+    void* output,
+    const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
+{
+  assert(rows > 7);
+  assert(channels != 0);
+
+  const uint16_t* i0 = input;
+  const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
+  const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
+  const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
+  const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
+  const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
+  const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
+  const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(uint16_t);
+
+  uint16_t* b = buffer;
+  size_t c = channels;
+  for (; c >= 16; c -= 16) {
+    const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+    const __m256 vi0x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+    const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+    const __m256 vi1x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+
+    const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+    __m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi2x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+    __m128i vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(vi0x89ABCDEF, vi1x89ABCDEF), _MM_FROUND_NO_EXC);
+
+    const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi3x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi2x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi4x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi3x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi5x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi4x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi6x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi5x89ABCDEF), _MM_FROUND_NO_EXC);
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi6x89ABCDEF), _MM_FROUND_NO_EXC);
+
+    _mm_store_si128((__m128i*) b, vacc01234567); b += 8;
+    _mm_store_si128((__m128i*) b, vacc89ABCDEF); b += 8;
+  }
+  if XNN_UNLIKELY(c != 0) {
+    do {
+      const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+      const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+      const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+      __m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_NO_EXC);
+
+      const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+
+      _mm_store_si128((__m128i*) b, vacc01234567); b += 8;
+
+      c = doz(c, 8);
+    } while (c != 0);
+  }
+
+  for (rows -= 7; rows > 7; rows -= 7) {
+    i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
+    i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
+    i2 = (const uint16_t*) ((uintptr_t) i2 + input_increment);
+    i3 = (const uint16_t*) ((uintptr_t) i3 + input_increment);
+    i4 = (const uint16_t*) ((uintptr_t) i4 + input_increment);
+    i5 = (const uint16_t*) ((uintptr_t) i5 + input_increment);
+    i6 = (const uint16_t*) ((uintptr_t) i6 + input_increment);
+
+    uint16_t* b = buffer;
+    size_t c = channels;
+    for (; c >= 16; c -= 16) {
+      __m128i vacc01234567 = _mm_loadu_si128((const __m128i*) b);
+      __m128i vacc89ABCDEF = _mm_loadu_si128((const __m128i*) (b + 8));
+
+      const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+      const __m256 vi0x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+
+      const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi0x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi1x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+      vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi0x89ABCDEF), _MM_FROUND_NO_EXC);
+      const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi1x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi2x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+      vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi1x89ABCDEF), _MM_FROUND_NO_EXC);
+      const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi3x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+      vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi2x89ABCDEF), _MM_FROUND_NO_EXC);
+      const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi4x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+      vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi3x89ABCDEF), _MM_FROUND_NO_EXC);
+      const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi5x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+      vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi4x89ABCDEF), _MM_FROUND_NO_EXC);
+      const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi6x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+      vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi5x89ABCDEF), _MM_FROUND_NO_EXC);
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+      vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi6x89ABCDEF), _MM_FROUND_NO_EXC);
+
+      _mm_store_si128((__m128i*) b, vacc01234567); b += 8;
+      _mm_store_si128((__m128i*) b, vacc89ABCDEF); b += 8;
+    }
+    if XNN_UNLIKELY(c != 0) {
+      do {
+        __m128i vacc01234567 = _mm_loadu_si128((const __m128i*) b);
+        const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+
+        const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+        vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi0x01234567), _MM_FROUND_NO_EXC);
+        const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+        vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi1x01234567), _MM_FROUND_NO_EXC);
+        const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+        vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+        const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+        vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+        const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+        vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+        const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+        vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+        vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+
+        _mm_store_si128((__m128i*) b, vacc01234567);
+        b += 8;
+
+        c = doz(c, 8);
+      } while (c != 0);
+    }
+  }
+
+  i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
+  i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
+  if XNN_UNPREDICTABLE(rows < 2) {
+    i1 = (const uint16_t*) zero;
+  }
+  i2 = (const uint16_t*) ((uintptr_t) i2 + input_increment);
+  if XNN_UNPREDICTABLE(rows <= 2) {
+    i2 = (const uint16_t*) zero;
+  }
+  i3 = (const uint16_t*) ((uintptr_t) i3 + input_increment);
+  if XNN_UNPREDICTABLE(rows < 4) {
+    i3 = (const uint16_t*) zero;
+  }
+  i4 = (const uint16_t*) ((uintptr_t) i4 + input_increment);
+  if XNN_UNPREDICTABLE(rows <= 4) {
+    i4 = (const uint16_t*) zero;
+  }
+  i5 = (const uint16_t*) ((uintptr_t) i5 + input_increment);
+  if XNN_UNPREDICTABLE(rows < 6) {
+    i5 = (const uint16_t*) zero;
+  }
+  i6 = (const uint16_t*) ((uintptr_t) i6 + input_increment);
+  if XNN_UNPREDICTABLE(rows <= 6) {
+    i6 = (const uint16_t*) zero;
+  }
+
+  const __m256 vscale = _mm256_load_ps(params->avx.scale);
+  const __m256 vmin = _mm256_load_ps(params->avx.min);
+  const __m256 vmax = _mm256_load_ps(params->avx.max);
+  for (; channels >= 16; channels -= 16) {
+    __m128i vacc01234567 = _mm_loadu_si128((const __m128i*) buffer); buffer = (uint16_t*) buffer + 8;
+    __m128i vacc89ABCDEF = _mm_loadu_si128((const __m128i*) buffer); buffer = (uint16_t*) buffer + 8;
+
+    const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+    const __m256 vi0x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+
+    const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi0x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi1x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi0x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi1x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi2x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi1x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi3x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi2x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi4x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi3x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi5x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi4x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi6x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi5x89ABCDEF), _MM_FROUND_NO_EXC);
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi6x89ABCDEF), _MM_FROUND_NO_EXC);
+
+    vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_NO_EXC);
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc89ABCDEF), vscale), _MM_FROUND_NO_EXC);
+
+    __m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
+    __m256 vout89ABCDEF = _mm256_max_ps(_mm256_cvtph_ps(vacc89ABCDEF), vmin);
+
+    vout01234567 = _mm256_min_ps(vout01234567, vmax);
+    vout89ABCDEF = _mm256_min_ps(vout89ABCDEF, vmax);
+
+    _mm_storeu_si128((__m128i*) output, _mm256_cvtps_ph(vout01234567, _MM_FROUND_NO_EXC));
+    _mm_storeu_si128((__m128i*) ((uint16_t*) output + 8), _mm256_cvtps_ph(vout89ABCDEF, _MM_FROUND_NO_EXC));
+    output = (uint16_t*) output + 16;
+  }
+  if XNN_UNLIKELY(channels != 0) {
+    do {
+      __m128i vacc01234567 = _mm_loadu_si128((const __m128i*) buffer); buffer = (uint16_t*) buffer + 8;
+
+      const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+      const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi0x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi1x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+
+      vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_NO_EXC);
+      __m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
+      vout01234567 = _mm256_min_ps(vout01234567, vmax);
+
+      if XNN_LIKELY(channels >= 8) {
+        _mm_storeu_si128((__m128i*) output, _mm256_cvtps_ph(vout01234567, _MM_FROUND_NO_EXC));
+        output = (uint16_t*) output + 8;
+        channels -= 8;
+      } else {
+        __m128i vh01234567 = _mm256_cvtps_ph(vout01234567, _MM_FROUND_NO_EXC);
+        if (channels & 4) {
+          _mm_storel_epi64((__m128i*) output, vh01234567);
+          output = (uint16_t*) output + 4;
+          vh01234567 = _mm_unpackhi_epi64(vh01234567, vh01234567);
+        }
+        if (channels & 2) {
+          *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vh01234567);
+          output = (uint16_t*) output + 2;
+          vh01234567 = _mm_srli_epi64(vh01234567, 32);
+        }
+        if (channels & 1) {
+          *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vh01234567, 0);
+        }
+        channels = 0;
+      }
+    } while (channels != 0);
+  }
+}
diff --git a/src/f16-gavgpool/gen/7p7x-minmax-f16c-c24.c b/src/f16-gavgpool/gen/7p7x-minmax-f16c-c24.c
new file mode 100644
index 0000000..98a86bc
--- /dev/null
+++ b/src/f16-gavgpool/gen/7p7x-minmax-f16c-c24.c
@@ -0,0 +1,344 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f16-gavgpool/multipass-f16c.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2022 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/gavgpool.h>
+#include <xnnpack/math.h>
+
+
+void xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24(
+    size_t rows,
+    size_t channels,
+    const void* input,
+    size_t input_stride,
+    const void* zero,
+    void* buffer,
+    void* output,
+    const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
+{
+  assert(rows > 7);
+  assert(channels != 0);
+
+  const uint16_t* i0 = input;
+  const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
+  const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
+  const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
+  const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
+  const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
+  const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
+  const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(uint16_t);
+
+  uint16_t* b = buffer;
+  size_t c = channels;
+  for (; c >= 24; c -= 24) {
+    const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+    const __m256 vi0x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+    const __m256 vi0xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+    const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+    const __m256 vi1x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+    const __m256 vi1xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+
+    const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+    __m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi2x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+    __m128i vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(vi0x89ABCDEF, vi1x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi2xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+    __m128i vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(vi0xGHIJKLMN, vi1xGHIJKLMN), _MM_FROUND_NO_EXC);
+
+    const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi3x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi2x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi3xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi2xGHIJKLMN), _MM_FROUND_NO_EXC);
+    const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi4x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi3x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi4xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi3xGHIJKLMN), _MM_FROUND_NO_EXC);
+    const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi5x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi4x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi5xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi4xGHIJKLMN), _MM_FROUND_NO_EXC);
+    const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi6x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi5x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi6xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi5xGHIJKLMN), _MM_FROUND_NO_EXC);
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi6x89ABCDEF), _MM_FROUND_NO_EXC);
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi6xGHIJKLMN), _MM_FROUND_NO_EXC);
+
+    _mm_store_si128((__m128i*) b, vacc01234567); b += 8;
+    _mm_store_si128((__m128i*) b, vacc89ABCDEF); b += 8;
+    _mm_store_si128((__m128i*) b, vaccGHIJKLMN); b += 8;
+  }
+  if XNN_UNLIKELY(c != 0) {
+    do {
+      const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+      const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+      const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+      __m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_NO_EXC);
+
+      const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+
+      _mm_store_si128((__m128i*) b, vacc01234567); b += 8;
+
+      c = doz(c, 8);
+    } while (c != 0);
+  }
+
+  for (rows -= 7; rows > 7; rows -= 7) {
+    i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
+    i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
+    i2 = (const uint16_t*) ((uintptr_t) i2 + input_increment);
+    i3 = (const uint16_t*) ((uintptr_t) i3 + input_increment);
+    i4 = (const uint16_t*) ((uintptr_t) i4 + input_increment);
+    i5 = (const uint16_t*) ((uintptr_t) i5 + input_increment);
+    i6 = (const uint16_t*) ((uintptr_t) i6 + input_increment);
+
+    uint16_t* b = buffer;
+    size_t c = channels;
+    for (; c >= 24; c -= 24) {
+      __m128i vacc01234567 = _mm_loadu_si128((const __m128i*) b);
+      __m128i vacc89ABCDEF = _mm_loadu_si128((const __m128i*) (b + 8));
+      __m128i vaccGHIJKLMN = _mm_loadu_si128((const __m128i*) (b + 16));
+
+      const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+      const __m256 vi0x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+      const __m256 vi0xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+
+      const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi0x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi1x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+      vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi0x89ABCDEF), _MM_FROUND_NO_EXC);
+      const __m256 vi1xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+      vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi0xGHIJKLMN), _MM_FROUND_NO_EXC);
+      const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi1x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi2x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+      vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi1x89ABCDEF), _MM_FROUND_NO_EXC);
+      const __m256 vi2xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+      vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi1xGHIJKLMN), _MM_FROUND_NO_EXC);
+      const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi3x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+      vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi2x89ABCDEF), _MM_FROUND_NO_EXC);
+      const __m256 vi3xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+      vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi2xGHIJKLMN), _MM_FROUND_NO_EXC);
+      const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi4x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+      vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi3x89ABCDEF), _MM_FROUND_NO_EXC);
+      const __m256 vi4xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+      vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi3xGHIJKLMN), _MM_FROUND_NO_EXC);
+      const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi5x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+      vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi4x89ABCDEF), _MM_FROUND_NO_EXC);
+      const __m256 vi5xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+      vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi4xGHIJKLMN), _MM_FROUND_NO_EXC);
+      const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi6x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+      vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi5x89ABCDEF), _MM_FROUND_NO_EXC);
+      const __m256 vi6xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+      vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi5xGHIJKLMN), _MM_FROUND_NO_EXC);
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+      vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi6x89ABCDEF), _MM_FROUND_NO_EXC);
+      vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi6xGHIJKLMN), _MM_FROUND_NO_EXC);
+
+      _mm_store_si128((__m128i*) b, vacc01234567); b += 8;
+      _mm_store_si128((__m128i*) b, vacc89ABCDEF); b += 8;
+      _mm_store_si128((__m128i*) b, vaccGHIJKLMN); b += 8;
+    }
+    if XNN_UNLIKELY(c != 0) {
+      do {
+        __m128i vacc01234567 = _mm_loadu_si128((const __m128i*) b);
+        const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+
+        const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+        vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi0x01234567), _MM_FROUND_NO_EXC);
+        const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+        vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi1x01234567), _MM_FROUND_NO_EXC);
+        const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+        vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+        const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+        vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+        const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+        vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+        const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+        vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+        vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+
+        _mm_store_si128((__m128i*) b, vacc01234567);
+        b += 8;
+
+        c = doz(c, 8);
+      } while (c != 0);
+    }
+  }
+
+  i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
+  i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
+  if XNN_UNPREDICTABLE(rows < 2) {
+    i1 = (const uint16_t*) zero;
+  }
+  i2 = (const uint16_t*) ((uintptr_t) i2 + input_increment);
+  if XNN_UNPREDICTABLE(rows <= 2) {
+    i2 = (const uint16_t*) zero;
+  }
+  i3 = (const uint16_t*) ((uintptr_t) i3 + input_increment);
+  if XNN_UNPREDICTABLE(rows < 4) {
+    i3 = (const uint16_t*) zero;
+  }
+  i4 = (const uint16_t*) ((uintptr_t) i4 + input_increment);
+  if XNN_UNPREDICTABLE(rows <= 4) {
+    i4 = (const uint16_t*) zero;
+  }
+  i5 = (const uint16_t*) ((uintptr_t) i5 + input_increment);
+  if XNN_UNPREDICTABLE(rows < 6) {
+    i5 = (const uint16_t*) zero;
+  }
+  i6 = (const uint16_t*) ((uintptr_t) i6 + input_increment);
+  if XNN_UNPREDICTABLE(rows <= 6) {
+    i6 = (const uint16_t*) zero;
+  }
+
+  const __m256 vscale = _mm256_load_ps(params->avx.scale);
+  const __m256 vmin = _mm256_load_ps(params->avx.min);
+  const __m256 vmax = _mm256_load_ps(params->avx.max);
+  for (; channels >= 24; channels -= 24) {
+    __m128i vacc01234567 = _mm_loadu_si128((const __m128i*) buffer); buffer = (uint16_t*) buffer + 8;
+    __m128i vacc89ABCDEF = _mm_loadu_si128((const __m128i*) buffer); buffer = (uint16_t*) buffer + 8;
+    __m128i vaccGHIJKLMN = _mm_loadu_si128((const __m128i*) buffer); buffer = (uint16_t*) buffer + 8;
+
+    const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+    const __m256 vi0x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+    const __m256 vi0xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+
+    const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi0x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi1x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi0x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi1xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi0xGHIJKLMN), _MM_FROUND_NO_EXC);
+    const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi1x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi2x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi1x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi2xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi1xGHIJKLMN), _MM_FROUND_NO_EXC);
+    const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi3x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi2x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi3xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi2xGHIJKLMN), _MM_FROUND_NO_EXC);
+    const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi4x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi3x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi4xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi3xGHIJKLMN), _MM_FROUND_NO_EXC);
+    const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi5x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi4x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi5xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi4xGHIJKLMN), _MM_FROUND_NO_EXC);
+    const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi6x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi5x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi6xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi5xGHIJKLMN), _MM_FROUND_NO_EXC);
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi6x89ABCDEF), _MM_FROUND_NO_EXC);
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi6xGHIJKLMN), _MM_FROUND_NO_EXC);
+
+    vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_NO_EXC);
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc89ABCDEF), vscale), _MM_FROUND_NO_EXC);
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vscale), _MM_FROUND_NO_EXC);
+
+    __m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
+    __m256 vout89ABCDEF = _mm256_max_ps(_mm256_cvtph_ps(vacc89ABCDEF), vmin);
+    __m256 voutGHIJKLMN = _mm256_max_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vmin);
+
+    vout01234567 = _mm256_min_ps(vout01234567, vmax);
+    vout89ABCDEF = _mm256_min_ps(vout89ABCDEF, vmax);
+    voutGHIJKLMN = _mm256_min_ps(voutGHIJKLMN, vmax);
+
+    _mm_storeu_si128((__m128i*) output, _mm256_cvtps_ph(vout01234567, _MM_FROUND_NO_EXC));
+    _mm_storeu_si128((__m128i*) ((uint16_t*) output + 8), _mm256_cvtps_ph(vout89ABCDEF, _MM_FROUND_NO_EXC));
+    _mm_storeu_si128((__m128i*) ((uint16_t*) output + 16), _mm256_cvtps_ph(voutGHIJKLMN, _MM_FROUND_NO_EXC));
+    output = (uint16_t*) output + 24;
+  }
+  if XNN_UNLIKELY(channels != 0) {
+    do {
+      __m128i vacc01234567 = _mm_loadu_si128((const __m128i*) buffer); buffer = (uint16_t*) buffer + 8;
+
+      const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+      const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi0x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi1x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+
+      vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_NO_EXC);
+      __m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
+      vout01234567 = _mm256_min_ps(vout01234567, vmax);
+
+      if XNN_LIKELY(channels >= 8) {
+        _mm_storeu_si128((__m128i*) output, _mm256_cvtps_ph(vout01234567, _MM_FROUND_NO_EXC));
+        output = (uint16_t*) output + 8;
+        channels -= 8;
+      } else {
+        __m128i vh01234567 = _mm256_cvtps_ph(vout01234567, _MM_FROUND_NO_EXC);
+        if (channels & 4) {
+          _mm_storel_epi64((__m128i*) output, vh01234567);
+          output = (uint16_t*) output + 4;
+          vh01234567 = _mm_unpackhi_epi64(vh01234567, vh01234567);
+        }
+        if (channels & 2) {
+          *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vh01234567);
+          output = (uint16_t*) output + 2;
+          vh01234567 = _mm_srli_epi64(vh01234567, 32);
+        }
+        if (channels & 1) {
+          *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vh01234567, 0);
+        }
+        channels = 0;
+      }
+    } while (channels != 0);
+  }
+}
diff --git a/src/f16-gavgpool/gen/7p7x-minmax-f16c-c32.c b/src/f16-gavgpool/gen/7p7x-minmax-f16c-c32.c
new file mode 100644
index 0000000..8168ab8
--- /dev/null
+++ b/src/f16-gavgpool/gen/7p7x-minmax-f16c-c32.c
@@ -0,0 +1,393 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f16-gavgpool/multipass-f16c.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2022 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/gavgpool.h>
+#include <xnnpack/math.h>
+
+
+void xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c32(
+    size_t rows,
+    size_t channels,
+    const void* input,
+    size_t input_stride,
+    const void* zero,
+    void* buffer,
+    void* output,
+    const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
+{
+  assert(rows > 7);
+  assert(channels != 0);
+
+  const uint16_t* i0 = input;
+  const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
+  const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
+  const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
+  const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
+  const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
+  const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
+  const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(uint16_t);
+
+  uint16_t* b = buffer;
+  size_t c = channels;
+  for (; c >= 32; c -= 32) {
+    const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+    const __m256 vi0x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+    const __m256 vi0xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+    const __m256 vi0xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+    const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+    const __m256 vi1x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+    const __m256 vi1xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+    const __m256 vi1xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+
+    const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+    __m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi2x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+    __m128i vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(vi0x89ABCDEF, vi1x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi2xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+    __m128i vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(vi0xGHIJKLMN, vi1xGHIJKLMN), _MM_FROUND_NO_EXC);
+    const __m256 vi2xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+    __m128i vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(vi0xOPQRSTUV, vi1xOPQRSTUV), _MM_FROUND_NO_EXC);
+
+    const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi3x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi2x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi3xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi2xGHIJKLMN), _MM_FROUND_NO_EXC);
+    const __m256 vi3xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+    vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi2xOPQRSTUV), _MM_FROUND_NO_EXC);
+    const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi4x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi3x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi4xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi3xGHIJKLMN), _MM_FROUND_NO_EXC);
+    const __m256 vi4xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+    vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi3xOPQRSTUV), _MM_FROUND_NO_EXC);
+    const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi5x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi4x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi5xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi4xGHIJKLMN), _MM_FROUND_NO_EXC);
+    const __m256 vi5xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+    vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi4xOPQRSTUV), _MM_FROUND_NO_EXC);
+    const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi6x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi5x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi6xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi5xGHIJKLMN), _MM_FROUND_NO_EXC);
+    const __m256 vi6xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+    vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi5xOPQRSTUV), _MM_FROUND_NO_EXC);
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi6x89ABCDEF), _MM_FROUND_NO_EXC);
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi6xGHIJKLMN), _MM_FROUND_NO_EXC);
+    vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi6xOPQRSTUV), _MM_FROUND_NO_EXC);
+
+    _mm_store_si128((__m128i*) b, vacc01234567); b += 8;
+    _mm_store_si128((__m128i*) b, vacc89ABCDEF); b += 8;
+    _mm_store_si128((__m128i*) b, vaccGHIJKLMN); b += 8;
+    _mm_store_si128((__m128i*) b, vaccOPQRSTUV); b += 8;
+  }
+  if XNN_UNLIKELY(c != 0) {
+    do {
+      const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+      const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+      const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+      __m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_NO_EXC);
+
+      const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+
+      _mm_store_si128((__m128i*) b, vacc01234567); b += 8;
+
+      c = doz(c, 8);
+    } while (c != 0);
+  }
+
+  for (rows -= 7; rows > 7; rows -= 7) {
+    i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
+    i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
+    i2 = (const uint16_t*) ((uintptr_t) i2 + input_increment);
+    i3 = (const uint16_t*) ((uintptr_t) i3 + input_increment);
+    i4 = (const uint16_t*) ((uintptr_t) i4 + input_increment);
+    i5 = (const uint16_t*) ((uintptr_t) i5 + input_increment);
+    i6 = (const uint16_t*) ((uintptr_t) i6 + input_increment);
+
+    uint16_t* b = buffer;
+    size_t c = channels;
+    for (; c >= 32; c -= 32) {
+      __m128i vacc01234567 = _mm_loadu_si128((const __m128i*) b);
+      __m128i vacc89ABCDEF = _mm_loadu_si128((const __m128i*) (b + 8));
+      __m128i vaccGHIJKLMN = _mm_loadu_si128((const __m128i*) (b + 16));
+      __m128i vaccOPQRSTUV = _mm_loadu_si128((const __m128i*) (b + 24));
+
+      const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+      const __m256 vi0x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+      const __m256 vi0xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+      const __m256 vi0xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+
+      const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi0x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi1x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+      vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi0x89ABCDEF), _MM_FROUND_NO_EXC);
+      const __m256 vi1xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+      vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi0xGHIJKLMN), _MM_FROUND_NO_EXC);
+      const __m256 vi1xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+      vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi0xOPQRSTUV), _MM_FROUND_NO_EXC);
+      const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi1x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi2x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+      vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi1x89ABCDEF), _MM_FROUND_NO_EXC);
+      const __m256 vi2xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+      vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi1xGHIJKLMN), _MM_FROUND_NO_EXC);
+      const __m256 vi2xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+      vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi1xOPQRSTUV), _MM_FROUND_NO_EXC);
+      const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi3x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+      vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi2x89ABCDEF), _MM_FROUND_NO_EXC);
+      const __m256 vi3xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+      vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi2xGHIJKLMN), _MM_FROUND_NO_EXC);
+      const __m256 vi3xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+      vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi2xOPQRSTUV), _MM_FROUND_NO_EXC);
+      const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi4x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+      vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi3x89ABCDEF), _MM_FROUND_NO_EXC);
+      const __m256 vi4xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+      vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi3xGHIJKLMN), _MM_FROUND_NO_EXC);
+      const __m256 vi4xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+      vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi3xOPQRSTUV), _MM_FROUND_NO_EXC);
+      const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi5x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+      vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi4x89ABCDEF), _MM_FROUND_NO_EXC);
+      const __m256 vi5xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+      vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi4xGHIJKLMN), _MM_FROUND_NO_EXC);
+      const __m256 vi5xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+      vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi4xOPQRSTUV), _MM_FROUND_NO_EXC);
+      const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi6x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+      vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi5x89ABCDEF), _MM_FROUND_NO_EXC);
+      const __m256 vi6xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+      vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi5xGHIJKLMN), _MM_FROUND_NO_EXC);
+      const __m256 vi6xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+      vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi5xOPQRSTUV), _MM_FROUND_NO_EXC);
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+      vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi6x89ABCDEF), _MM_FROUND_NO_EXC);
+      vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi6xGHIJKLMN), _MM_FROUND_NO_EXC);
+      vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi6xOPQRSTUV), _MM_FROUND_NO_EXC);
+
+      _mm_store_si128((__m128i*) b, vacc01234567); b += 8;
+      _mm_store_si128((__m128i*) b, vacc89ABCDEF); b += 8;
+      _mm_store_si128((__m128i*) b, vaccGHIJKLMN); b += 8;
+      _mm_store_si128((__m128i*) b, vaccOPQRSTUV); b += 8;
+    }
+    if XNN_UNLIKELY(c != 0) {
+      do {
+        __m128i vacc01234567 = _mm_loadu_si128((const __m128i*) b);
+        const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+
+        const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+        vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi0x01234567), _MM_FROUND_NO_EXC);
+        const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+        vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi1x01234567), _MM_FROUND_NO_EXC);
+        const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+        vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+        const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+        vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+        const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+        vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+        const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+        vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+        vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+
+        _mm_store_si128((__m128i*) b, vacc01234567);
+        b += 8;
+
+        c = doz(c, 8);
+      } while (c != 0);
+    }
+  }
+
+  i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
+  i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
+  if XNN_UNPREDICTABLE(rows < 2) {
+    i1 = (const uint16_t*) zero;
+  }
+  i2 = (const uint16_t*) ((uintptr_t) i2 + input_increment);
+  if XNN_UNPREDICTABLE(rows <= 2) {
+    i2 = (const uint16_t*) zero;
+  }
+  i3 = (const uint16_t*) ((uintptr_t) i3 + input_increment);
+  if XNN_UNPREDICTABLE(rows < 4) {
+    i3 = (const uint16_t*) zero;
+  }
+  i4 = (const uint16_t*) ((uintptr_t) i4 + input_increment);
+  if XNN_UNPREDICTABLE(rows <= 4) {
+    i4 = (const uint16_t*) zero;
+  }
+  i5 = (const uint16_t*) ((uintptr_t) i5 + input_increment);
+  if XNN_UNPREDICTABLE(rows < 6) {
+    i5 = (const uint16_t*) zero;
+  }
+  i6 = (const uint16_t*) ((uintptr_t) i6 + input_increment);
+  if XNN_UNPREDICTABLE(rows <= 6) {
+    i6 = (const uint16_t*) zero;
+  }
+
+  const __m256 vscale = _mm256_load_ps(params->avx.scale);
+  const __m256 vmin = _mm256_load_ps(params->avx.min);
+  const __m256 vmax = _mm256_load_ps(params->avx.max);
+  for (; channels >= 32; channels -= 32) {
+    __m128i vacc01234567 = _mm_loadu_si128((const __m128i*) buffer); buffer = (uint16_t*) buffer + 8;
+    __m128i vacc89ABCDEF = _mm_loadu_si128((const __m128i*) buffer); buffer = (uint16_t*) buffer + 8;
+    __m128i vaccGHIJKLMN = _mm_loadu_si128((const __m128i*) buffer); buffer = (uint16_t*) buffer + 8;
+    __m128i vaccOPQRSTUV = _mm_loadu_si128((const __m128i*) buffer); buffer = (uint16_t*) buffer + 8;
+
+    const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+    const __m256 vi0x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+    const __m256 vi0xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+    const __m256 vi0xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+
+    const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi0x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi1x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi0x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi1xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi0xGHIJKLMN), _MM_FROUND_NO_EXC);
+    const __m256 vi1xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+    vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi0xOPQRSTUV), _MM_FROUND_NO_EXC);
+    const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi1x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi2x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi1x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi2xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi1xGHIJKLMN), _MM_FROUND_NO_EXC);
+    const __m256 vi2xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+    vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi1xOPQRSTUV), _MM_FROUND_NO_EXC);
+    const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi3x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi2x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi3xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi2xGHIJKLMN), _MM_FROUND_NO_EXC);
+    const __m256 vi3xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+    vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi2xOPQRSTUV), _MM_FROUND_NO_EXC);
+    const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi4x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi3x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi4xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi3xGHIJKLMN), _MM_FROUND_NO_EXC);
+    const __m256 vi4xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+    vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi3xOPQRSTUV), _MM_FROUND_NO_EXC);
+    const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi5x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi4x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi5xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi4xGHIJKLMN), _MM_FROUND_NO_EXC);
+    const __m256 vi5xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+    vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi4xOPQRSTUV), _MM_FROUND_NO_EXC);
+    const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi6x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi5x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi6xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi5xGHIJKLMN), _MM_FROUND_NO_EXC);
+    const __m256 vi6xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+    vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi5xOPQRSTUV), _MM_FROUND_NO_EXC);
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi6x89ABCDEF), _MM_FROUND_NO_EXC);
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi6xGHIJKLMN), _MM_FROUND_NO_EXC);
+    vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi6xOPQRSTUV), _MM_FROUND_NO_EXC);
+
+    vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_NO_EXC);
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc89ABCDEF), vscale), _MM_FROUND_NO_EXC);
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vscale), _MM_FROUND_NO_EXC);
+    vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vscale), _MM_FROUND_NO_EXC);
+
+    __m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
+    __m256 vout89ABCDEF = _mm256_max_ps(_mm256_cvtph_ps(vacc89ABCDEF), vmin);
+    __m256 voutGHIJKLMN = _mm256_max_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vmin);
+    __m256 voutOPQRSTUV = _mm256_max_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vmin);
+
+    vout01234567 = _mm256_min_ps(vout01234567, vmax);
+    vout89ABCDEF = _mm256_min_ps(vout89ABCDEF, vmax);
+    voutGHIJKLMN = _mm256_min_ps(voutGHIJKLMN, vmax);
+    voutOPQRSTUV = _mm256_min_ps(voutOPQRSTUV, vmax);
+
+    _mm_storeu_si128((__m128i*) output, _mm256_cvtps_ph(vout01234567, _MM_FROUND_NO_EXC));
+    _mm_storeu_si128((__m128i*) ((uint16_t*) output + 8), _mm256_cvtps_ph(vout89ABCDEF, _MM_FROUND_NO_EXC));
+    _mm_storeu_si128((__m128i*) ((uint16_t*) output + 16), _mm256_cvtps_ph(voutGHIJKLMN, _MM_FROUND_NO_EXC));
+    _mm_storeu_si128((__m128i*) ((uint16_t*) output + 24), _mm256_cvtps_ph(voutOPQRSTUV, _MM_FROUND_NO_EXC));
+    output = (uint16_t*) output + 32;
+  }
+  if XNN_UNLIKELY(channels != 0) {
+    do {
+      __m128i vacc01234567 = _mm_loadu_si128((const __m128i*) buffer); buffer = (uint16_t*) buffer + 8;
+
+      const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+      const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi0x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi1x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+
+      vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_NO_EXC);
+      __m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
+      vout01234567 = _mm256_min_ps(vout01234567, vmax);
+
+      if XNN_LIKELY(channels >= 8) {
+        _mm_storeu_si128((__m128i*) output, _mm256_cvtps_ph(vout01234567, _MM_FROUND_NO_EXC));
+        output = (uint16_t*) output + 8;
+        channels -= 8;
+      } else {
+        __m128i vh01234567 = _mm256_cvtps_ph(vout01234567, _MM_FROUND_NO_EXC);
+        if (channels & 4) {
+          _mm_storel_epi64((__m128i*) output, vh01234567);
+          output = (uint16_t*) output + 4;
+          vh01234567 = _mm_unpackhi_epi64(vh01234567, vh01234567);
+        }
+        if (channels & 2) {
+          *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vh01234567);
+          output = (uint16_t*) output + 2;
+          vh01234567 = _mm_srli_epi64(vh01234567, 32);
+        }
+        if (channels & 1) {
+          *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vh01234567, 0);
+        }
+        channels = 0;
+      }
+    } while (channels != 0);
+  }
+}
diff --git a/src/f16-gavgpool/gen/7p7x-minmax-f16c-c8.c b/src/f16-gavgpool/gen/7p7x-minmax-f16c-c8.c
new file mode 100644
index 0000000..6dda651
--- /dev/null
+++ b/src/f16-gavgpool/gen/7p7x-minmax-f16c-c8.c
@@ -0,0 +1,192 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f16-gavgpool/multipass-f16c.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2022 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/gavgpool.h>
+#include <xnnpack/math.h>
+
+
+void xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8(
+    size_t rows,
+    size_t channels,
+    const void* input,
+    size_t input_stride,
+    const void* zero,
+    void* buffer,
+    void* output,
+    const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
+{
+  assert(rows > 7);
+  assert(channels != 0);
+
+  const uint16_t* i0 = input;
+  const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
+  const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
+  const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
+  const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
+  const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
+  const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
+  const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(uint16_t);
+
+  uint16_t* b = buffer;
+  size_t c = channels;
+  for (; c != 0; c = doz(c, 8)) {
+    const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+    const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+
+    const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+    __m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_NO_EXC);
+
+    const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+
+    _mm_store_si128((__m128i*) b, vacc01234567); b += 8;
+  }
+
+  for (rows -= 7; rows > 7; rows -= 7) {
+    i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
+    i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
+    i2 = (const uint16_t*) ((uintptr_t) i2 + input_increment);
+    i3 = (const uint16_t*) ((uintptr_t) i3 + input_increment);
+    i4 = (const uint16_t*) ((uintptr_t) i4 + input_increment);
+    i5 = (const uint16_t*) ((uintptr_t) i5 + input_increment);
+    i6 = (const uint16_t*) ((uintptr_t) i6 + input_increment);
+
+    uint16_t* b = buffer;
+    size_t c = channels;
+    for (; c != 0; c = doz(c, 8)) {
+      __m128i vacc01234567 = _mm_loadu_si128((const __m128i*) b);
+
+      const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+
+      const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi0x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi1x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+
+      _mm_store_si128((__m128i*) b, vacc01234567); b += 8;
+    }
+  }
+
+  i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
+  i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
+  if XNN_UNPREDICTABLE(rows < 2) {
+    i1 = (const uint16_t*) zero;
+  }
+  i2 = (const uint16_t*) ((uintptr_t) i2 + input_increment);
+  if XNN_UNPREDICTABLE(rows <= 2) {
+    i2 = (const uint16_t*) zero;
+  }
+  i3 = (const uint16_t*) ((uintptr_t) i3 + input_increment);
+  if XNN_UNPREDICTABLE(rows < 4) {
+    i3 = (const uint16_t*) zero;
+  }
+  i4 = (const uint16_t*) ((uintptr_t) i4 + input_increment);
+  if XNN_UNPREDICTABLE(rows <= 4) {
+    i4 = (const uint16_t*) zero;
+  }
+  i5 = (const uint16_t*) ((uintptr_t) i5 + input_increment);
+  if XNN_UNPREDICTABLE(rows < 6) {
+    i5 = (const uint16_t*) zero;
+  }
+  i6 = (const uint16_t*) ((uintptr_t) i6 + input_increment);
+  if XNN_UNPREDICTABLE(rows <= 6) {
+    i6 = (const uint16_t*) zero;
+  }
+
+  const __m256 vscale = _mm256_load_ps(params->avx.scale);
+  const __m256 vmin = _mm256_load_ps(params->avx.min);
+  const __m256 vmax = _mm256_load_ps(params->avx.max);
+  for (; channels >= 8; channels -= 8) {
+    __m128i vacc01234567 = _mm_loadu_si128((const __m128i*) buffer); buffer = (uint16_t*) buffer + 8;
+
+    const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+
+    const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi0x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi1x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+
+    vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_NO_EXC);
+
+    __m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
+
+    vout01234567 = _mm256_min_ps(vout01234567, vmax);
+
+    _mm_storeu_si128((__m128i*) output, _mm256_cvtps_ph(vout01234567, _MM_FROUND_NO_EXC));
+    output = (uint16_t*) output + 8;
+  }
+  if XNN_UNLIKELY(channels != 0) {
+    {
+      __m128i vacc01234567 = _mm_loadu_si128((const __m128i*) buffer); buffer = (uint16_t*) buffer + 8;
+
+      const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+      const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); i1 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi0x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi1x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3)); i3 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4)); i4 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5)); i5 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6)); i6 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+
+      vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_NO_EXC);
+      __m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
+      vout01234567 = _mm256_min_ps(vout01234567, vmax);
+
+      __m128i vh01234567 = _mm256_cvtps_ph(vout01234567, _MM_FROUND_NO_EXC);
+      if (channels & 4) {
+        _mm_storel_epi64((__m128i*) output, vh01234567);
+        output = (uint16_t*) output + 4;
+        vh01234567 = _mm_unpackhi_epi64(vh01234567, vh01234567);
+      }
+      if (channels & 2) {
+        *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vh01234567);
+        output = (uint16_t*) output + 2;
+        vh01234567 = _mm_srli_epi64(vh01234567, 32);
+      }
+      if (channels & 1) {
+        *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vh01234567, 0);
+      }
+    }
+  }
+}
diff --git a/src/f16-gavgpool/gen/7x-minmax-f16c-c16.c b/src/f16-gavgpool/gen/7x-minmax-f16c-c16.c
new file mode 100644
index 0000000..26aef1e
--- /dev/null
+++ b/src/f16-gavgpool/gen/7x-minmax-f16c-c16.c
@@ -0,0 +1,161 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f16-gavgpool/unipass-f16c.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2022 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/gavgpool.h>
+
+
+void xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c16(
+    size_t rows,
+    size_t channels,
+    const void* input,
+    size_t input_stride,
+    const void* zero,
+    void* output,
+    const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
+{
+  assert(rows != 0);
+  assert(rows <= 7);
+  assert(channels != 0);
+
+  const uint16_t* i0 = input;
+  const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
+  if XNN_UNPREDICTABLE(rows < 2) {
+    i1 = (const uint16_t*) zero;
+  }
+  const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
+  if XNN_UNPREDICTABLE(rows <= 2) {
+    i2 = (const uint16_t*) zero;
+  }
+  const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
+  if XNN_UNPREDICTABLE(rows < 4) {
+    i3 = (const uint16_t*) zero;
+  }
+  const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
+  if XNN_UNPREDICTABLE(rows <= 4) {
+    i4 = (const uint16_t*) zero;
+  }
+  const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
+  if XNN_UNPREDICTABLE(rows < 6) {
+    i5 = (const uint16_t*) zero;
+  }
+  const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
+  if XNN_UNPREDICTABLE(rows <= 6) {
+    i6 = (const uint16_t*) zero;
+  }
+
+  const __m256 vscale = _mm256_load_ps(params->avx.scale);
+  const __m256 vmin = _mm256_load_ps(params->avx.min);
+  const __m256 vmax = _mm256_load_ps(params->avx.max);
+  for (; channels >= 16; channels -= 16) {
+    const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
+    const __m256 vi0x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i0 + 8)));
+    i0 += 16;
+    const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
+    const __m256 vi1x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i1 + 8)));
+    i1 += 16;
+
+    const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
+    __m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi2x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i2 + 8)));
+    __m128i vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(vi0x89ABCDEF, vi1x89ABCDEF), _MM_FROUND_NO_EXC);
+    i2 += 16;
+
+    const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi3x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i3 + 8)));
+    i3 += 16;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi2x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi4x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i4 + 8)));
+    i4 += 16;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi3x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi5x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i5 + 8)));
+    i5 += 16;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi4x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi6x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i6 + 8)));
+    i6 += 16;
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi5x89ABCDEF), _MM_FROUND_NO_EXC);
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi6x89ABCDEF), _MM_FROUND_NO_EXC);
+
+    vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_NO_EXC);
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc89ABCDEF), vscale), _MM_FROUND_NO_EXC);
+
+    __m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
+    __m256 vout89ABCDEF = _mm256_max_ps(_mm256_cvtph_ps(vacc89ABCDEF), vmin);
+
+    vout01234567 = _mm256_min_ps(vout01234567, vmax);
+    vout89ABCDEF = _mm256_min_ps(vout89ABCDEF, vmax);
+
+    _mm_storeu_si128((__m128i*) output, _mm256_cvtps_ph(vout01234567, _MM_FROUND_NO_EXC));
+    _mm_storeu_si128((__m128i*) ((uint16_t*) output + 8), _mm256_cvtps_ph(vout89ABCDEF, _MM_FROUND_NO_EXC));
+    output = (uint16_t*) output + 16;
+  }
+  if XNN_UNLIKELY(channels != 0) {
+    do {
+      const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
+      i0 += 8;
+      const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
+      i1 += 8;
+
+      const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
+      __m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_NO_EXC);
+      i2 += 8;
+
+      const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
+      i3 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
+      i4 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
+      i5 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
+      i6 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+
+      vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_NO_EXC);
+      __m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
+      vout01234567 = _mm256_min_ps(vout01234567, vmax);
+
+      if XNN_LIKELY(channels >= 8) {
+        _mm_storeu_si128((__m128i*) output, _mm256_cvtps_ph(vout01234567, _MM_FROUND_NO_EXC));
+        output = (uint16_t*) output + 8;
+        channels -= 8;
+      } else {
+        __m128i vh01234567 = _mm256_cvtps_ph(vout01234567, _MM_FROUND_NO_EXC);
+        if (channels & 4) {
+          _mm_storel_epi64((__m128i*) output, vh01234567);
+          output = (uint16_t*) output + 4;
+          vh01234567 = _mm_unpackhi_epi64(vh01234567, vh01234567);
+        }
+        if (channels & 2) {
+          *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vh01234567);
+          output = (uint16_t*) output + 2;
+          vh01234567 = _mm_srli_epi64(vh01234567, 32);
+        }
+        if (channels & 1) {
+          *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vh01234567, 0);
+        }
+        channels = 0;
+      }
+    } while (channels != 0);
+  }
+}
diff --git a/src/f16-gavgpool/gen/7x-minmax-f16c-c24.c b/src/f16-gavgpool/gen/7x-minmax-f16c-c24.c
new file mode 100644
index 0000000..42d64e2
--- /dev/null
+++ b/src/f16-gavgpool/gen/7x-minmax-f16c-c24.c
@@ -0,0 +1,178 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f16-gavgpool/unipass-f16c.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2022 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/gavgpool.h>
+
+
+void xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c24(
+    size_t rows,
+    size_t channels,
+    const void* input,
+    size_t input_stride,
+    const void* zero,
+    void* output,
+    const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
+{
+  assert(rows != 0);
+  assert(rows <= 7);
+  assert(channels != 0);
+
+  const uint16_t* i0 = input;
+  const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
+  if XNN_UNPREDICTABLE(rows < 2) {
+    i1 = (const uint16_t*) zero;
+  }
+  const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
+  if XNN_UNPREDICTABLE(rows <= 2) {
+    i2 = (const uint16_t*) zero;
+  }
+  const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
+  if XNN_UNPREDICTABLE(rows < 4) {
+    i3 = (const uint16_t*) zero;
+  }
+  const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
+  if XNN_UNPREDICTABLE(rows <= 4) {
+    i4 = (const uint16_t*) zero;
+  }
+  const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
+  if XNN_UNPREDICTABLE(rows < 6) {
+    i5 = (const uint16_t*) zero;
+  }
+  const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
+  if XNN_UNPREDICTABLE(rows <= 6) {
+    i6 = (const uint16_t*) zero;
+  }
+
+  const __m256 vscale = _mm256_load_ps(params->avx.scale);
+  const __m256 vmin = _mm256_load_ps(params->avx.min);
+  const __m256 vmax = _mm256_load_ps(params->avx.max);
+  for (; channels >= 24; channels -= 24) {
+    const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
+    const __m256 vi0x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i0 + 8)));
+    const __m256 vi0xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i0 + 16)));
+    i0 += 24;
+    const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
+    const __m256 vi1x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i1 + 8)));
+    const __m256 vi1xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i1 + 16)));
+    i1 += 24;
+
+    const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
+    __m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi2x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i2 + 8)));
+    __m128i vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(vi0x89ABCDEF, vi1x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi2xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i2 + 16)));
+    __m128i vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(vi0xGHIJKLMN, vi1xGHIJKLMN), _MM_FROUND_NO_EXC);
+    i2 += 24;
+
+    const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi3x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i3 + 8)));
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi2x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi3xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i3 + 16)));
+    i3 += 24;
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi2xGHIJKLMN), _MM_FROUND_NO_EXC);
+    const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi4x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i4 + 8)));
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi3x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi4xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i4 + 16)));
+    i4 += 24;
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi3xGHIJKLMN), _MM_FROUND_NO_EXC);
+    const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi5x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i5 + 8)));
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi4x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi5xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i5 + 16)));
+    i5 += 24;
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi4xGHIJKLMN), _MM_FROUND_NO_EXC);
+    const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi6x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i6 + 8)));
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi5x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi6xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i6 + 16)));
+    i6 += 24;
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi5xGHIJKLMN), _MM_FROUND_NO_EXC);
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi6x89ABCDEF), _MM_FROUND_NO_EXC);
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi6xGHIJKLMN), _MM_FROUND_NO_EXC);
+
+    vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_NO_EXC);
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc89ABCDEF), vscale), _MM_FROUND_NO_EXC);
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vscale), _MM_FROUND_NO_EXC);
+
+    __m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
+    __m256 vout89ABCDEF = _mm256_max_ps(_mm256_cvtph_ps(vacc89ABCDEF), vmin);
+    __m256 voutGHIJKLMN = _mm256_max_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vmin);
+
+    vout01234567 = _mm256_min_ps(vout01234567, vmax);
+    vout89ABCDEF = _mm256_min_ps(vout89ABCDEF, vmax);
+    voutGHIJKLMN = _mm256_min_ps(voutGHIJKLMN, vmax);
+
+    _mm_storeu_si128((__m128i*) output, _mm256_cvtps_ph(vout01234567, _MM_FROUND_NO_EXC));
+    _mm_storeu_si128((__m128i*) ((uint16_t*) output + 8), _mm256_cvtps_ph(vout89ABCDEF, _MM_FROUND_NO_EXC));
+    _mm_storeu_si128((__m128i*) ((uint16_t*) output + 16), _mm256_cvtps_ph(voutGHIJKLMN, _MM_FROUND_NO_EXC));
+    output = (uint16_t*) output + 24;
+  }
+  if XNN_UNLIKELY(channels != 0) {
+    do {
+      const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
+      i0 += 8;
+      const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
+      i1 += 8;
+
+      const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
+      __m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_NO_EXC);
+      i2 += 8;
+
+      const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
+      i3 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
+      i4 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
+      i5 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
+      i6 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+
+      vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_NO_EXC);
+      __m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
+      vout01234567 = _mm256_min_ps(vout01234567, vmax);
+
+      if XNN_LIKELY(channels >= 8) {
+        _mm_storeu_si128((__m128i*) output, _mm256_cvtps_ph(vout01234567, _MM_FROUND_NO_EXC));
+        output = (uint16_t*) output + 8;
+        channels -= 8;
+      } else {
+        __m128i vh01234567 = _mm256_cvtps_ph(vout01234567, _MM_FROUND_NO_EXC);
+        if (channels & 4) {
+          _mm_storel_epi64((__m128i*) output, vh01234567);
+          output = (uint16_t*) output + 4;
+          vh01234567 = _mm_unpackhi_epi64(vh01234567, vh01234567);
+        }
+        if (channels & 2) {
+          *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vh01234567);
+          output = (uint16_t*) output + 2;
+          vh01234567 = _mm_srli_epi64(vh01234567, 32);
+        }
+        if (channels & 1) {
+          *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vh01234567, 0);
+        }
+        channels = 0;
+      }
+    } while (channels != 0);
+  }
+}
diff --git a/src/f16-gavgpool/gen/7x-minmax-f16c-c32.c b/src/f16-gavgpool/gen/7x-minmax-f16c-c32.c
new file mode 100644
index 0000000..1431cbb
--- /dev/null
+++ b/src/f16-gavgpool/gen/7x-minmax-f16c-c32.c
@@ -0,0 +1,195 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f16-gavgpool/unipass-f16c.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2022 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/gavgpool.h>
+
+
+void xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c32(
+    size_t rows,
+    size_t channels,
+    const void* input,
+    size_t input_stride,
+    const void* zero,
+    void* output,
+    const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
+{
+  assert(rows != 0);
+  assert(rows <= 7);
+  assert(channels != 0);
+
+  const uint16_t* i0 = input;
+  const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
+  if XNN_UNPREDICTABLE(rows < 2) {
+    i1 = (const uint16_t*) zero;
+  }
+  const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
+  if XNN_UNPREDICTABLE(rows <= 2) {
+    i2 = (const uint16_t*) zero;
+  }
+  const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
+  if XNN_UNPREDICTABLE(rows < 4) {
+    i3 = (const uint16_t*) zero;
+  }
+  const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
+  if XNN_UNPREDICTABLE(rows <= 4) {
+    i4 = (const uint16_t*) zero;
+  }
+  const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
+  if XNN_UNPREDICTABLE(rows < 6) {
+    i5 = (const uint16_t*) zero;
+  }
+  const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
+  if XNN_UNPREDICTABLE(rows <= 6) {
+    i6 = (const uint16_t*) zero;
+  }
+
+  const __m256 vscale = _mm256_load_ps(params->avx.scale);
+  const __m256 vmin = _mm256_load_ps(params->avx.min);
+  const __m256 vmax = _mm256_load_ps(params->avx.max);
+  for (; channels >= 32; channels -= 32) {
+    const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
+    const __m256 vi0x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i0 + 8)));
+    const __m256 vi0xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i0 + 16)));
+    const __m256 vi0xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i0 + 24)));
+    i0 += 32;
+    const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
+    const __m256 vi1x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i1 + 8)));
+    const __m256 vi1xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i1 + 16)));
+    const __m256 vi1xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i1 + 24)));
+    i1 += 32;
+
+    const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
+    __m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi2x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i2 + 8)));
+    __m128i vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(vi0x89ABCDEF, vi1x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi2xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i2 + 16)));
+    __m128i vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(vi0xGHIJKLMN, vi1xGHIJKLMN), _MM_FROUND_NO_EXC);
+    const __m256 vi2xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i2 + 24)));
+    __m128i vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(vi0xOPQRSTUV, vi1xOPQRSTUV), _MM_FROUND_NO_EXC);
+    i2 += 32;
+
+    const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi3x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i3 + 8)));
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi2x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi3xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i3 + 16)));
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi2xGHIJKLMN), _MM_FROUND_NO_EXC);
+    const __m256 vi3xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i3 + 24)));
+    i3 += 32;
+    vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi2xOPQRSTUV), _MM_FROUND_NO_EXC);
+    const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi4x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i4 + 8)));
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi3x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi4xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i4 + 16)));
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi3xGHIJKLMN), _MM_FROUND_NO_EXC);
+    const __m256 vi4xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i4 + 24)));
+    i4 += 32;
+    vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi3xOPQRSTUV), _MM_FROUND_NO_EXC);
+    const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi5x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i5 + 8)));
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi4x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi5xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i5 + 16)));
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi4xGHIJKLMN), _MM_FROUND_NO_EXC);
+    const __m256 vi5xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i5 + 24)));
+    i5 += 32;
+    vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi4xOPQRSTUV), _MM_FROUND_NO_EXC);
+    const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi6x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i6 + 8)));
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi5x89ABCDEF), _MM_FROUND_NO_EXC);
+    const __m256 vi6xGHIJKLMN = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i6 + 16)));
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi5xGHIJKLMN), _MM_FROUND_NO_EXC);
+    const __m256 vi6xOPQRSTUV = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i6 + 24)));
+    i6 += 32;
+    vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi5xOPQRSTUV), _MM_FROUND_NO_EXC);
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc89ABCDEF), vi6x89ABCDEF), _MM_FROUND_NO_EXC);
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vi6xGHIJKLMN), _MM_FROUND_NO_EXC);
+    vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vi6xOPQRSTUV), _MM_FROUND_NO_EXC);
+
+    vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_NO_EXC);
+    vacc89ABCDEF = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc89ABCDEF), vscale), _MM_FROUND_NO_EXC);
+    vaccGHIJKLMN = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vscale), _MM_FROUND_NO_EXC);
+    vaccOPQRSTUV = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vscale), _MM_FROUND_NO_EXC);
+
+    __m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
+    __m256 vout89ABCDEF = _mm256_max_ps(_mm256_cvtph_ps(vacc89ABCDEF), vmin);
+    __m256 voutGHIJKLMN = _mm256_max_ps(_mm256_cvtph_ps(vaccGHIJKLMN), vmin);
+    __m256 voutOPQRSTUV = _mm256_max_ps(_mm256_cvtph_ps(vaccOPQRSTUV), vmin);
+
+    vout01234567 = _mm256_min_ps(vout01234567, vmax);
+    vout89ABCDEF = _mm256_min_ps(vout89ABCDEF, vmax);
+    voutGHIJKLMN = _mm256_min_ps(voutGHIJKLMN, vmax);
+    voutOPQRSTUV = _mm256_min_ps(voutOPQRSTUV, vmax);
+
+    _mm_storeu_si128((__m128i*) output, _mm256_cvtps_ph(vout01234567, _MM_FROUND_NO_EXC));
+    _mm_storeu_si128((__m128i*) ((uint16_t*) output + 8), _mm256_cvtps_ph(vout89ABCDEF, _MM_FROUND_NO_EXC));
+    _mm_storeu_si128((__m128i*) ((uint16_t*) output + 16), _mm256_cvtps_ph(voutGHIJKLMN, _MM_FROUND_NO_EXC));
+    _mm_storeu_si128((__m128i*) ((uint16_t*) output + 24), _mm256_cvtps_ph(voutOPQRSTUV, _MM_FROUND_NO_EXC));
+    output = (uint16_t*) output + 32;
+  }
+  if XNN_UNLIKELY(channels != 0) {
+    do {
+      const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
+      i0 += 8;
+      const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
+      i1 += 8;
+
+      const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
+      __m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_NO_EXC);
+      i2 += 8;
+
+      const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
+      i3 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
+      i4 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
+      i5 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
+      i6 += 8;
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+
+      vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_NO_EXC);
+      __m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
+      vout01234567 = _mm256_min_ps(vout01234567, vmax);
+
+      if XNN_LIKELY(channels >= 8) {
+        _mm_storeu_si128((__m128i*) output, _mm256_cvtps_ph(vout01234567, _MM_FROUND_NO_EXC));
+        output = (uint16_t*) output + 8;
+        channels -= 8;
+      } else {
+        __m128i vh01234567 = _mm256_cvtps_ph(vout01234567, _MM_FROUND_NO_EXC);
+        if (channels & 4) {
+          _mm_storel_epi64((__m128i*) output, vh01234567);
+          output = (uint16_t*) output + 4;
+          vh01234567 = _mm_unpackhi_epi64(vh01234567, vh01234567);
+        }
+        if (channels & 2) {
+          *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vh01234567);
+          output = (uint16_t*) output + 2;
+          vh01234567 = _mm_srli_epi64(vh01234567, 32);
+        }
+        if (channels & 1) {
+          *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vh01234567, 0);
+        }
+        channels = 0;
+      }
+    } while (channels != 0);
+  }
+}
diff --git a/src/f16-gavgpool/gen/7x-minmax-f16c-c8.c b/src/f16-gavgpool/gen/7x-minmax-f16c-c8.c
new file mode 100644
index 0000000..aef97de
--- /dev/null
+++ b/src/f16-gavgpool/gen/7x-minmax-f16c-c8.c
@@ -0,0 +1,130 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f16-gavgpool/unipass-f16c.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2022 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/gavgpool.h>
+
+
+void xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c8(
+    size_t rows,
+    size_t channels,
+    const void* input,
+    size_t input_stride,
+    const void* zero,
+    void* output,
+    const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
+{
+  assert(rows != 0);
+  assert(rows <= 7);
+  assert(channels != 0);
+
+  const uint16_t* i0 = input;
+  const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
+  if XNN_UNPREDICTABLE(rows < 2) {
+    i1 = (const uint16_t*) zero;
+  }
+  const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
+  if XNN_UNPREDICTABLE(rows <= 2) {
+    i2 = (const uint16_t*) zero;
+  }
+  const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
+  if XNN_UNPREDICTABLE(rows < 4) {
+    i3 = (const uint16_t*) zero;
+  }
+  const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
+  if XNN_UNPREDICTABLE(rows <= 4) {
+    i4 = (const uint16_t*) zero;
+  }
+  const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
+  if XNN_UNPREDICTABLE(rows < 6) {
+    i5 = (const uint16_t*) zero;
+  }
+  const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
+  if XNN_UNPREDICTABLE(rows <= 6) {
+    i6 = (const uint16_t*) zero;
+  }
+
+  const __m256 vscale = _mm256_load_ps(params->avx.scale);
+  const __m256 vmin = _mm256_load_ps(params->avx.min);
+  const __m256 vmax = _mm256_load_ps(params->avx.max);
+  for (; channels >= 8; channels -= 8) {
+    const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
+    i0 += 8;
+    const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
+    i1 += 8;
+
+    const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
+    __m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_NO_EXC);
+    i2 += 8;
+
+    const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
+    i3 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
+    i4 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
+    i5 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+    const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
+    i6 += 8;
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+    vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+
+    vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_NO_EXC);
+
+    __m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
+
+    vout01234567 = _mm256_min_ps(vout01234567, vmax);
+
+    _mm_storeu_si128((__m128i*) output, _mm256_cvtps_ph(vout01234567, _MM_FROUND_NO_EXC));
+    output = (uint16_t*) output + 8;
+  }
+  if XNN_UNLIKELY(channels != 0) {
+    {
+      const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
+      const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
+
+      const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
+      __m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_NO_EXC);
+
+      const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
+      const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
+      vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
+
+      vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_NO_EXC);
+      __m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
+      vout01234567 = _mm256_min_ps(vout01234567, vmax);
+
+      __m128i vh01234567 = _mm256_cvtps_ph(vout01234567, _MM_FROUND_NO_EXC);
+      if (channels & 4) {
+        _mm_storel_epi64((__m128i*) output, vh01234567);
+        output = (uint16_t*) output + 4;
+        vh01234567 = _mm_unpackhi_epi64(vh01234567, vh01234567);
+      }
+      if (channels & 2) {
+        *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vh01234567);
+        output = (uint16_t*) output + 2;
+        vh01234567 = _mm_srli_epi64(vh01234567, 32);
+      }
+      if (channels & 1) {
+        *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vh01234567, 0);
+      }
+    }
+  }
+}
diff --git a/src/f16-gavgpool/multipass-f16c.c.in b/src/f16-gavgpool/multipass-f16c.c.in
new file mode 100644
index 0000000..f0c6b57
--- /dev/null
+++ b/src/f16-gavgpool/multipass-f16c.c.in
@@ -0,0 +1,213 @@
+// Copyright 2022 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert CHANNEL_TILE % 8 == 0
+$assert CHANNEL_TILE >= 8
+$assert ROW_TILE >= 3
+$assert ROW_SUBTILE >= 3
+$assert ROW_SUBTILE <= ROW_TILE
+$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/gavgpool.h>
+#include <xnnpack/math.h>
+
+
+void xnn_f16_gavgpool_minmax_ukernel_${ROW_TILE}p${ROW_SUBTILE}x__f16c_c${CHANNEL_TILE}(
+    size_t rows,
+    size_t channels,
+    const void* input,
+    size_t input_stride,
+    const void* zero,
+    void* buffer,
+    void* output,
+    const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
+{
+  assert(rows > ${ROW_TILE});
+  assert(channels != 0);
+
+  const uint16_t* i0 = input;
+  $for M in range(1, ROW_TILE):
+    const uint16_t* i${M} = (const uint16_t*) ((uintptr_t) i${M-1} + input_stride);
+  const size_t input_increment = ${ROW_TILE} * input_stride - round_up_po2(channels, 8) * sizeof(uint16_t);
+
+  uint16_t* b = buffer;
+  size_t c = channels;
+  for (; ${"c >= %d" % CHANNEL_TILE if CHANNEL_TILE > 8 else "c != 0"}; ${("c -= %d" if CHANNEL_TILE > 8 else "c = doz(c, %d)") % CHANNEL_TILE}) {
+    $for M in range(2):
+      $for C in range(0, CHANNEL_TILE, 8):
+        const __m256 vi${M}x${ABC[C:C+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i${M})); i${M} += 8;
+
+    $for C in range(0, CHANNEL_TILE, 8):
+      const __m256 vi2x${ABC[C:C+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2)); i2 += 8;
+      __m128i vacc${ABC[C:C+8]} = _mm256_cvtps_ph(_mm256_add_ps(vi0x${ABC[C:C+8]}, vi1x${ABC[C:C+8]}), _MM_FROUND_NO_EXC);
+
+    $for M in range(2, ROW_TILE):
+      $for C in range(0, CHANNEL_TILE, 8):
+        $if M + 1 != ROW_TILE:
+          const __m256 vi${M+1}x${ABC[C:C+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i${M+1})); i${M+1} += 8;
+        vacc${ABC[C:C+8]} = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc${ABC[C:C+8]}), vi${M}x${ABC[C:C+8]}), _MM_FROUND_NO_EXC);
+
+    $for C in range(0, CHANNEL_TILE, 8):
+      _mm_store_si128((__m128i*) b, vacc${ABC[C:C+8]}); b += 8;
+  }
+  $if CHANNEL_TILE > 8:
+    if XNN_UNLIKELY(c != 0) {
+      do {
+        $for M in range(3):
+          const __m256 vi${M}x${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i${M})); i${M} += 8;
+        __m128i vacc${ABC[0:8]} = _mm256_cvtps_ph(_mm256_add_ps(vi0x${ABC[0:8]}, vi1x${ABC[0:8]}), _MM_FROUND_NO_EXC);
+
+        $for M in range(2, ROW_TILE):
+          $if M + 1 != ROW_TILE:
+            const __m256 vi${M+1}x${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i${M+1})); i${M+1} += 8;
+          vacc${ABC[0:8]} = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc${ABC[0:8]}), vi${M}x${ABC[0:8]}), _MM_FROUND_NO_EXC);
+
+        _mm_store_si128((__m128i*) b, vacc${ABC[0:8]}); b += 8;
+
+        c = doz(c, 8);
+      } while (c != 0);
+    }
+
+  for (rows -= ${ROW_TILE}; rows > ${ROW_SUBTILE}; rows -= ${ROW_SUBTILE}) {
+    $for M in range(ROW_SUBTILE):
+      i${M} = (const uint16_t*) ((uintptr_t) i${M + ROW_TILE - ROW_SUBTILE} + input_increment);
+
+    uint16_t* b = buffer;
+    size_t c = channels;
+    for (; ${"c >= %d" % CHANNEL_TILE if CHANNEL_TILE > 8 else "c != 0"}; ${("c -= %d" if CHANNEL_TILE > 8 else "c = doz(c, %d)") % CHANNEL_TILE}) {
+      __m128i vacc${ABC[0:8]} = _mm_loadu_si128((const __m128i*) b);
+      $for C in range(8, CHANNEL_TILE, 8):
+        __m128i vacc${ABC[C:C+8]} = _mm_loadu_si128((const __m128i*) (b + ${C}));
+
+      $for C in range(0, CHANNEL_TILE, 8):
+        const __m256 vi0x${ABC[C:C+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+
+      $for M in range(ROW_TILE):
+        $for C in range(0, CHANNEL_TILE, 8):
+          $if M + 1 != ROW_TILE:
+            const __m256 vi${M+1}x${ABC[C:C+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i${M+1})); i${M+1} += 8;
+          vacc${ABC[C:C+8]} = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc${ABC[C:C+8]}), vi${M}x${ABC[C:C+8]}), _MM_FROUND_NO_EXC);
+
+      $for C in range(0, CHANNEL_TILE, 8):
+        _mm_store_si128((__m128i*) b, vacc${ABC[C:C+8]}); b += 8;
+    }
+    $if CHANNEL_TILE > 8:
+      if XNN_UNLIKELY(c != 0) {
+        do {
+          __m128i vacc${ABC[0:8]} = _mm_loadu_si128((const __m128i*) b);
+          const __m256 vi0x${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+
+          $for M in range(ROW_TILE):
+            $if M + 1 != ROW_TILE:
+              const __m256 vi${M+1}x${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i${M+1})); i${M+1} += 8;
+            vacc${ABC[0:8]} = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc${ABC[0:8]}), vi${M}x${ABC[0:8]}), _MM_FROUND_NO_EXC);
+
+          _mm_store_si128((__m128i*) b, vacc${ABC[0:8]});
+          b += 8;
+
+          c = doz(c, 8);
+        } while (c != 0);
+      }
+  }
+
+  i0 = (const uint16_t*) ((uintptr_t) i${ROW_TILE - ROW_SUBTILE} + input_increment);
+  $for M in range(1, ROW_SUBTILE):
+    i${M} = (const uint16_t*) ((uintptr_t) i${M + ROW_TILE - ROW_SUBTILE} + input_increment);
+    $if M % 2 == 1:
+      if XNN_UNPREDICTABLE(rows < ${M+1}) {
+        i${M} = (const uint16_t*) zero;
+      }
+    $else:
+      if XNN_UNPREDICTABLE(rows <= ${M}) {
+        i${M} = (const uint16_t*) zero;
+      }
+
+  const __m256 vscale = _mm256_load_ps(params->avx.scale);
+  const __m256 vmin = _mm256_load_ps(params->avx.min);
+  const __m256 vmax = _mm256_load_ps(params->avx.max);
+  for (; channels >= ${CHANNEL_TILE}; channels -= ${CHANNEL_TILE}) {
+    $for C in range(0, CHANNEL_TILE, 8):
+      __m128i vacc${ABC[C:C+8]} = _mm_loadu_si128((const __m128i*) buffer); buffer = (uint16_t*) buffer + 8;
+
+    $for C in range(0, CHANNEL_TILE, 8):
+      const __m256 vi0x${ABC[C:C+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+
+    $for M in range(ROW_TILE):
+      $for C in range(0, CHANNEL_TILE, 8):
+        $if M + 1 != ROW_TILE:
+          const __m256 vi${M+1}x${ABC[C:C+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i${M+1})); i${M+1} += 8;
+        vacc${ABC[C:C+8]} = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc${ABC[C:C+8]}), vi${M}x${ABC[C:C+8]}), _MM_FROUND_NO_EXC);
+
+    $for C in range(0, CHANNEL_TILE, 8):
+      vacc${ABC[C:C+8]} = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc${ABC[C:C+8]}), vscale), _MM_FROUND_NO_EXC);
+
+    $for C in range(0, CHANNEL_TILE, 8):
+      __m256 vout${ABC[C:C+8]} = _mm256_max_ps(_mm256_cvtph_ps(vacc${ABC[C:C+8]}), vmin);
+
+    $for C in range(0, CHANNEL_TILE, 8):
+      vout${ABC[C:C+8]} = _mm256_min_ps(vout${ABC[C:C+8]}, vmax);
+
+    _mm_storeu_si128((__m128i*) output, _mm256_cvtps_ph(vout${ABC[0:8]}, _MM_FROUND_NO_EXC));
+    $for C in range(8, CHANNEL_TILE, 8):
+      _mm_storeu_si128((__m128i*) ((uint16_t*) output + ${C}), _mm256_cvtps_ph(vout${ABC[C:C+8]}, _MM_FROUND_NO_EXC));
+    output = (uint16_t*) output + ${CHANNEL_TILE};
+  }
+  if XNN_UNLIKELY(channels != 0) {
+    ${"do " if CHANNEL_TILE > 8 else ""}{
+      __m128i vacc${ABC[0:8]} = _mm_loadu_si128((const __m128i*) buffer); buffer = (uint16_t*) buffer + 8;
+
+      const __m256 vi0x${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0)); i0 += 8;
+      $for M in range(ROW_TILE):
+        $if M + 1 != ROW_TILE:
+          const __m256 vi${M+1}x${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i${M+1})); i${M+1} += 8;
+        vacc${ABC[0:8]} = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc${ABC[0:8]}), vi${M}x${ABC[0:8]}), _MM_FROUND_NO_EXC);
+
+      vacc${ABC[0:8]} = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc${ABC[0:8]}), vscale), _MM_FROUND_NO_EXC);
+      __m256 vout${ABC[0:8]} = _mm256_max_ps(_mm256_cvtph_ps(vacc${ABC[0:8]}), vmin);
+      vout${ABC[0:8]} = _mm256_min_ps(vout${ABC[0:8]}, vmax);
+
+      $if CHANNEL_TILE > 8:
+        if XNN_LIKELY(channels >= 8) {
+          _mm_storeu_si128((__m128i*) output, _mm256_cvtps_ph(vout${ABC[0:8]}, _MM_FROUND_NO_EXC));
+          output = (uint16_t*) output + 8;
+          channels -= 8;
+        } else {
+          __m128i vh${ABC[0:8]} = _mm256_cvtps_ph(vout${ABC[0:8]}, _MM_FROUND_NO_EXC);
+          if (channels & 4) {
+            _mm_storel_epi64((__m128i*) output, vh${ABC[0:8]});
+            output = (uint16_t*) output + 4;
+            vh${ABC[0:8]} = _mm_unpackhi_epi64(vh${ABC[0:8]}, vh${ABC[0:8]});
+          }
+          if (channels & 2) {
+            *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vh${ABC[0:8]});
+            output = (uint16_t*) output + 2;
+            vh${ABC[0:8]} = _mm_srli_epi64(vh${ABC[0:8]}, 32);
+          }
+          if (channels & 1) {
+            *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vh${ABC[0:8]}, 0);
+          }
+          channels = 0;
+        }
+      $else:
+        __m128i vh${ABC[0:8]} = _mm256_cvtps_ph(vout${ABC[0:8]}, _MM_FROUND_NO_EXC);
+        if (channels & 4) {
+          _mm_storel_epi64((__m128i*) output, vh${ABC[0:8]});
+          output = (uint16_t*) output + 4;
+          vh${ABC[0:8]} = _mm_unpackhi_epi64(vh${ABC[0:8]}, vh${ABC[0:8]});
+        }
+        if (channels & 2) {
+          *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vh${ABC[0:8]});
+          output = (uint16_t*) output + 2;
+          vh${ABC[0:8]} = _mm_srli_epi64(vh${ABC[0:8]}, 32);
+        }
+        if (channels & 1) {
+          *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vh${ABC[0:8]}, 0);
+        }
+    }${" while (channels != 0);" if CHANNEL_TILE > 8 else ""}
+  }
+}
diff --git a/src/f16-gavgpool/unipass-f16c.c.in b/src/f16-gavgpool/unipass-f16c.c.in
new file mode 100644
index 0000000..65e5c4e
--- /dev/null
+++ b/src/f16-gavgpool/unipass-f16c.c.in
@@ -0,0 +1,147 @@
+// Copyright 2022 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert CHANNEL_TILE % 8 == 0
+$assert CHANNEL_TILE >= 8
+$assert ROW_TILE >= 3
+$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/gavgpool.h>
+
+
+void xnn_f16_gavgpool_minmax_ukernel_${ROW_TILE}x__f16c_c${CHANNEL_TILE}(
+    size_t rows,
+    size_t channels,
+    const void* input,
+    size_t input_stride,
+    const void* zero,
+    void* output,
+    const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
+{
+  assert(rows != 0);
+  assert(rows <= ${ROW_TILE});
+  assert(channels != 0);
+
+  const uint16_t* i0 = input;
+  $for M in range(1, ROW_TILE):
+    const uint16_t* i${M} = (const uint16_t*) ((uintptr_t) i${M-1} + input_stride);
+    $if M % 2 == 1:
+      if XNN_UNPREDICTABLE(rows < ${M+1}) {
+        i${M} = (const uint16_t*) zero;
+      }
+    $else:
+      if XNN_UNPREDICTABLE(rows <= ${M}) {
+        i${M} = (const uint16_t*) zero;
+      }
+
+  const __m256 vscale = _mm256_load_ps(params->avx.scale);
+  const __m256 vmin = _mm256_load_ps(params->avx.min);
+  const __m256 vmax = _mm256_load_ps(params->avx.max);
+  for (; channels >= ${CHANNEL_TILE}; channels -= ${CHANNEL_TILE}) {
+    $for M in range(2):
+      const __m256 vi${M}x${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i${M}));
+      $for C in range(8, CHANNEL_TILE, 8):
+        const __m256 vi${M}x${ABC[C:C+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i${M} + ${C})));
+      i${M} += ${CHANNEL_TILE};
+
+    $for C in range(0, CHANNEL_TILE, 8):
+      $if C == 0:
+        const __m256 vi2x${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
+      $else:
+        const __m256 vi2x${ABC[C:C+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i2 + ${C})));
+      __m128i vacc${ABC[C:C+8]} = _mm256_cvtps_ph(_mm256_add_ps(vi0x${ABC[C:C+8]}, vi1x${ABC[C:C+8]}), _MM_FROUND_NO_EXC);
+    i2 += ${CHANNEL_TILE};
+
+    $for M in range(2, ROW_TILE):
+      $for C in range(0, CHANNEL_TILE, 8):
+        $if M + 1 != ROW_TILE:
+          $if C == 0:
+            const __m256 vi${M+1}x${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i${M+1}));
+          $else:
+            const __m256 vi${M+1}x${ABC[C:C+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i${M+1} + ${C})));
+          $if C + 8 == CHANNEL_TILE:
+            i${M+1} += ${CHANNEL_TILE};
+        vacc${ABC[C:C+8]} = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc${ABC[C:C+8]}), vi${M}x${ABC[C:C+8]}), _MM_FROUND_NO_EXC);
+
+    $for C in range(0, CHANNEL_TILE, 8):
+      vacc${ABC[C:C+8]} = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc${ABC[C:C+8]}), vscale), _MM_FROUND_NO_EXC);
+
+    $for C in range(0, CHANNEL_TILE, 8):
+      __m256 vout${ABC[C:C+8]} = _mm256_max_ps(_mm256_cvtph_ps(vacc${ABC[C:C+8]}), vmin);
+
+    $for C in range(0, CHANNEL_TILE, 8):
+      vout${ABC[C:C+8]} = _mm256_min_ps(vout${ABC[C:C+8]}, vmax);
+
+    _mm_storeu_si128((__m128i*) output, _mm256_cvtps_ph(vout${ABC[0:8]}, _MM_FROUND_NO_EXC));
+    $for C in range(8, CHANNEL_TILE, 8):
+      _mm_storeu_si128((__m128i*) ((uint16_t*) output + ${C}), _mm256_cvtps_ph(vout${ABC[C:C+8]}, _MM_FROUND_NO_EXC));
+    output = (uint16_t*) output + ${CHANNEL_TILE};
+  }
+  if XNN_UNLIKELY(channels != 0) {
+    ${"do " if CHANNEL_TILE > 8 else ""}{
+      $for M in range(2):
+        const __m256 vi${M}x${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i${M}));
+        $if CHANNEL_TILE > 8:
+          i${M} += 8;
+
+      const __m256 vi2x${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
+      __m128i vacc${ABC[0:8]} = _mm256_cvtps_ph(_mm256_add_ps(vi0x${ABC[0:8]}, vi1x${ABC[0:8]}), _MM_FROUND_NO_EXC);
+      $if CHANNEL_TILE > 8:
+        i2 += 8;
+
+      $for M in range(2, ROW_TILE):
+        $if M + 1 != ROW_TILE:
+          const __m256 vi${M+1}x${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i${M+1}));
+          $if CHANNEL_TILE > 8:
+            i${M+1} += 8;
+        vacc${ABC[0:8]} = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc${ABC[0:8]}), vi${M}x${ABC[0:8]}), _MM_FROUND_NO_EXC);
+
+      vacc${ABC[0:8]} = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc${ABC[0:8]}), vscale), _MM_FROUND_NO_EXC);
+      __m256 vout${ABC[0:8]} = _mm256_max_ps(_mm256_cvtph_ps(vacc${ABC[0:8]}), vmin);
+      vout${ABC[0:8]} = _mm256_min_ps(vout${ABC[0:8]}, vmax);
+
+      $if CHANNEL_TILE > 8:
+        if XNN_LIKELY(channels >= 8) {
+          _mm_storeu_si128((__m128i*) output, _mm256_cvtps_ph(vout${ABC[0:8]}, _MM_FROUND_NO_EXC));
+          output = (uint16_t*) output + 8;
+          channels -= 8;
+        } else {
+          __m128i vh${ABC[0:8]} = _mm256_cvtps_ph(vout${ABC[0:8]}, _MM_FROUND_NO_EXC);
+          if (channels & 4) {
+            _mm_storel_epi64((__m128i*) output, vh${ABC[0:8]});
+            output = (uint16_t*) output + 4;
+            vh${ABC[0:8]} = _mm_unpackhi_epi64(vh${ABC[0:8]}, vh${ABC[0:8]});
+          }
+          if (channels & 2) {
+            *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vh${ABC[0:8]});
+            output = (uint16_t*) output + 2;
+            vh${ABC[0:8]} = _mm_srli_epi64(vh${ABC[0:8]}, 32);
+          }
+          if (channels & 1) {
+            *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vh${ABC[0:8]}, 0);
+          }
+          channels = 0;
+        }
+      $else:
+        __m128i vh${ABC[0:8]} = _mm256_cvtps_ph(vout${ABC[0:8]}, _MM_FROUND_NO_EXC);
+        if (channels & 4) {
+          _mm_storel_epi64((__m128i*) output, vh${ABC[0:8]});
+          output = (uint16_t*) output + 4;
+          vh${ABC[0:8]} = _mm_unpackhi_epi64(vh${ABC[0:8]}, vh${ABC[0:8]});
+        }
+        if (channels & 2) {
+          *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vh${ABC[0:8]});
+          output = (uint16_t*) output + 2;
+          vh${ABC[0:8]} = _mm_srli_epi64(vh${ABC[0:8]}, 32);
+        }
+        if (channels & 1) {
+          *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vh${ABC[0:8]}, 0);
+        }
+    }${" while (channels != 0);" if CHANNEL_TILE > 8 else ""}
+  }
+}
diff --git a/src/xnnpack/gavgpool.h b/src/xnnpack/gavgpool.h
index a242a28..660aeac 100644
--- a/src/xnnpack/gavgpool.h
+++ b/src/xnnpack/gavgpool.h
@@ -72,6 +72,11 @@
 DECLARE_F16_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f16_gavgpool_minmax_ukernel_7p7x__neonfp16arith_c24)
 DECLARE_F16_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f16_gavgpool_minmax_ukernel_7p7x__neonfp16arith_c32)
 
+DECLARE_F16_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8)
+DECLARE_F16_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16)
+DECLARE_F16_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24)
+DECLARE_F16_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c32)
+
 
 #define DECLARE_F16_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(fn_name) \
   XNN_INTERNAL void fn_name(                                          \
@@ -88,6 +93,11 @@
 DECLARE_F16_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f16_gavgpool_minmax_ukernel_7x__neonfp16arith_c24)
 DECLARE_F16_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f16_gavgpool_minmax_ukernel_7x__neonfp16arith_c32)
 
+DECLARE_F16_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c8)
+DECLARE_F16_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c16)
+DECLARE_F16_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c24)
+DECLARE_F16_GAVGPOOL_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c32)
+
 
 #define DECLARE_QS8_GAVGPOOL_MINMAX_MULTIPASS_UKERNEL_FUNCTION(fn_name) \
   XNN_INTERNAL void fn_name(                                            \
diff --git a/test/f16-gavgpool-minmax.cc b/test/f16-gavgpool-minmax.cc
index 0a80ded..ef8b1c2 100644
--- a/test/f16-gavgpool-minmax.cc
+++ b/test/f16-gavgpool-minmax.cc
@@ -1710,3 +1710,1695 @@
     }
   }
 #endif  // XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C8, channels_eq_8_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(7)
+      .channels(8)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C8, channels_eq_8_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t rows = 1; rows < 7; rows++) {
+      GAvgPoolMicrokernelTester()
+        .rows(rows)
+        .channels(8)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C8, channels_eq_8_fulltile_with_input_stride) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(7)
+      .channels(8)
+      .input_stride(11)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C8, channels_eq_8_fulltile_with_qmax) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(7)
+      .channels(8)
+      .qmax(128)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C8, channels_eq_8_fulltile_with_qmin) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(7)
+      .channels(8)
+      .qmin(128)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C8, channels_div_8_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 16; channels < 64; channels += 8) {
+      GAvgPoolMicrokernelTester()
+        .rows(7)
+        .channels(channels)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C8, channels_div_8_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 16; channels < 64; channels += 8) {
+      for (size_t rows = 1; rows < 7; rows++) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C8, channels_lt_8_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 8; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(7)
+        .channels(channels)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C8, channels_lt_8_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 8; channels++) {
+      for (size_t rows = 1; rows < 7; rows++) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C8, channels_lt_8_fulltile_with_qmax) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 8; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(7)
+        .channels(channels)
+        .qmax(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C8, channels_lt_8_fulltile_with_qmin) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 8; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(7)
+        .channels(channels)
+        .qmin(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C8, channels_gt_8_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 9; channels < 16; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(7)
+        .channels(channels)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C8, channels_gt_8_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 9; channels < 16; channels++) {
+      for (size_t rows = 1; rows < 7; rows++) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C8, channels_gt_8_fulltile_with_qmax) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 9; channels < 16; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(7)
+        .channels(channels)
+        .qmax(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C8, channels_gt_8_fulltile_with_qmin) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 9; channels < 16; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(7)
+        .channels(channels)
+        .qmin(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C16, channels_eq_16_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(7)
+      .channels(16)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C16, channels_eq_16_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t rows = 1; rows < 7; rows++) {
+      GAvgPoolMicrokernelTester()
+        .rows(rows)
+        .channels(16)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C16, channels_eq_16_fulltile_with_input_stride) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(7)
+      .channels(16)
+      .input_stride(19)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C16, channels_eq_16_fulltile_with_qmax) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(7)
+      .channels(16)
+      .qmax(128)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C16, channels_eq_16_fulltile_with_qmin) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(7)
+      .channels(16)
+      .qmin(128)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C16, channels_div_16_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 32; channels < 128; channels += 16) {
+      GAvgPoolMicrokernelTester()
+        .rows(7)
+        .channels(channels)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C16, channels_div_16_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 32; channels < 128; channels += 16) {
+      for (size_t rows = 1; rows < 7; rows++) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C16, channels_lt_16_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 16; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(7)
+        .channels(channels)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C16, channels_lt_16_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 16; channels++) {
+      for (size_t rows = 1; rows < 7; rows++) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C16, channels_lt_16_fulltile_with_qmax) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 16; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(7)
+        .channels(channels)
+        .qmax(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C16, channels_lt_16_fulltile_with_qmin) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 16; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(7)
+        .channels(channels)
+        .qmin(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C16, channels_gt_16_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 17; channels < 32; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(7)
+        .channels(channels)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C16, channels_gt_16_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 17; channels < 32; channels++) {
+      for (size_t rows = 1; rows < 7; rows++) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C16, channels_gt_16_fulltile_with_qmax) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 17; channels < 32; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(7)
+        .channels(channels)
+        .qmax(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C16, channels_gt_16_fulltile_with_qmin) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 17; channels < 32; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(7)
+        .channels(channels)
+        .qmin(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C24, channels_eq_24_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(7)
+      .channels(24)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C24, channels_eq_24_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t rows = 1; rows < 7; rows++) {
+      GAvgPoolMicrokernelTester()
+        .rows(rows)
+        .channels(24)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C24, channels_eq_24_fulltile_with_input_stride) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(7)
+      .channels(24)
+      .input_stride(29)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C24, channels_eq_24_fulltile_with_qmax) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(7)
+      .channels(24)
+      .qmax(128)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C24, channels_eq_24_fulltile_with_qmin) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(7)
+      .channels(24)
+      .qmin(128)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C24, channels_div_24_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 48; channels < 192; channels += 24) {
+      GAvgPoolMicrokernelTester()
+        .rows(7)
+        .channels(channels)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C24, channels_div_24_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 48; channels < 192; channels += 24) {
+      for (size_t rows = 1; rows < 7; rows++) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C24, channels_lt_24_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 24; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(7)
+        .channels(channels)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C24, channels_lt_24_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 24; channels++) {
+      for (size_t rows = 1; rows < 7; rows++) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C24, channels_lt_24_fulltile_with_qmax) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 24; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(7)
+        .channels(channels)
+        .qmax(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C24, channels_lt_24_fulltile_with_qmin) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 24; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(7)
+        .channels(channels)
+        .qmin(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C24, channels_gt_24_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 25; channels < 48; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(7)
+        .channels(channels)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C24, channels_gt_24_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 25; channels < 48; channels++) {
+      for (size_t rows = 1; rows < 7; rows++) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C24, channels_gt_24_fulltile_with_qmax) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 25; channels < 48; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(7)
+        .channels(channels)
+        .qmax(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C24, channels_gt_24_fulltile_with_qmin) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 25; channels < 48; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(7)
+        .channels(channels)
+        .qmin(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C32, channels_eq_32_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(7)
+      .channels(32)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C32, channels_eq_32_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t rows = 1; rows < 7; rows++) {
+      GAvgPoolMicrokernelTester()
+        .rows(rows)
+        .channels(32)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C32, channels_eq_32_fulltile_with_input_stride) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(7)
+      .channels(32)
+      .input_stride(37)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C32, channels_eq_32_fulltile_with_qmax) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(7)
+      .channels(32)
+      .qmax(128)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C32, channels_eq_32_fulltile_with_qmin) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(7)
+      .channels(32)
+      .qmin(128)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C32, channels_div_32_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 64; channels < 256; channels += 32) {
+      GAvgPoolMicrokernelTester()
+        .rows(7)
+        .channels(channels)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C32, channels_div_32_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 64; channels < 256; channels += 32) {
+      for (size_t rows = 1; rows < 7; rows++) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C32, channels_lt_32_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 32; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(7)
+        .channels(channels)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C32, channels_lt_32_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 32; channels++) {
+      for (size_t rows = 1; rows < 7; rows++) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C32, channels_lt_32_fulltile_with_qmax) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 32; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(7)
+        .channels(channels)
+        .qmax(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C32, channels_lt_32_fulltile_with_qmin) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 32; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(7)
+        .channels(channels)
+        .qmin(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C32, channels_gt_32_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 33; channels < 64; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(7)
+        .channels(channels)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C32, channels_gt_32_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 33; channels < 64; channels++) {
+      for (size_t rows = 1; rows < 7; rows++) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C32, channels_gt_32_fulltile_with_qmax) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 33; channels < 64; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(7)
+        .channels(channels)
+        .qmax(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7X__F16C_C32, channels_gt_32_fulltile_with_qmin) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 33; channels < 64; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(7)
+        .channels(channels)
+        .qmin(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C8, channels_eq_8_2pass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(14)
+      .channels(8)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C8, channels_eq_8_2pass_fulltile_with_input_stride) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(14)
+      .channels(8)
+      .input_stride(11)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C8, channels_eq_8_2pass_fulltile_with_qmax) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(14)
+      .channels(8)
+      .qmax(128)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C8, channels_eq_8_2pass_fulltile_with_qmin) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(14)
+      .channels(8)
+      .qmin(128)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C8, channels_eq_8_2pass_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t rows = 8; rows < 14; rows++) {
+      GAvgPoolMicrokernelTester()
+        .rows(rows)
+        .channels(8)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C8, channels_eq_8_2pass_subtile_with_input_stride) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t rows = 8; rows < 14; rows++) {
+      GAvgPoolMicrokernelTester()
+        .rows(rows)
+        .channels(8)
+        .input_stride(11)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C8, channels_eq_8_multipass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t rows = 14; rows <= 35; rows += 7) {
+      GAvgPoolMicrokernelTester()
+        .rows(rows)
+        .channels(8)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C8, channels_eq_8_multipass_fulltile_with_input_stride) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t rows = 14; rows <= 35; rows += 7) {
+      GAvgPoolMicrokernelTester()
+        .rows(rows)
+        .channels(8)
+        .input_stride(11)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C8, channels_div_8_2pass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 16; channels < 64; channels += 8) {
+      GAvgPoolMicrokernelTester()
+        .rows(14)
+        .channels(channels)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C8, channels_div_8_2pass_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 16; channels < 64; channels += 8) {
+      for (size_t rows = 8; rows < 14; rows++) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C8, channels_div_8_multipass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 16; channels < 64; channels += 8) {
+      for (size_t rows = 14; rows <= 35; rows += 7) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C8, channels_div_8_multipass_fulltile_with_input_stride) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 16; channels < 64; channels += 8) {
+      for (size_t rows = 14; rows <= 35; rows += 7) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .input_stride(131)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C8, channels_lt_8_2pass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 8; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(14)
+        .channels(channels)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C8, channels_lt_8_2pass_fulltile_with_qmax) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 8; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(14)
+        .channels(channels)
+        .qmax(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C8, channels_lt_8_2pass_fulltile_with_qmin) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 8; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(14)
+        .channels(channels)
+        .qmin(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C8, channels_lt_8_2pass_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 8; channels++) {
+      for (size_t rows = 8; rows < 14; rows++) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C8, channels_lt_8_multipass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 8; channels++) {
+      for (size_t rows = 14; rows <= 35; rows += 7) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C8, channels_lt_8_multipass_fulltile_with_input_stride) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 8; channels++) {
+      for (size_t rows = 14; rows <= 35; rows += 7) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .input_stride(11)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C8, channels_gt_8_2pass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 9; channels < 16; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(14)
+        .channels(channels)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C8, channels_gt_8_2pass_fulltile_with_qmax) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 9; channels < 16; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(14)
+        .channels(channels)
+        .qmax(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C8, channels_gt_8_2pass_fulltile_with_qmin) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 9; channels < 16; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(14)
+        .channels(channels)
+        .qmin(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C8, channels_gt_8_2pass_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 9; channels < 16; channels++) {
+      for (size_t rows = 8; rows < 14; rows++) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C8, channels_gt_8_multipass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 9; channels < 16; channels++) {
+      for (size_t rows = 14; rows < 35; rows += 14) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C8, channels_gt_8_multipass_fulltile_with_input_stride) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 9; channels < 16; channels++) {
+      for (size_t rows = 14; rows < 35; rows += 14) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .input_stride(29)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C16, channels_eq_16_2pass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(14)
+      .channels(16)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C16, channels_eq_16_2pass_fulltile_with_input_stride) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(14)
+      .channels(16)
+      .input_stride(19)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C16, channels_eq_16_2pass_fulltile_with_qmax) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(14)
+      .channels(16)
+      .qmax(128)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C16, channels_eq_16_2pass_fulltile_with_qmin) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(14)
+      .channels(16)
+      .qmin(128)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C16, channels_eq_16_2pass_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t rows = 8; rows < 14; rows++) {
+      GAvgPoolMicrokernelTester()
+        .rows(rows)
+        .channels(16)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C16, channels_eq_16_2pass_subtile_with_input_stride) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t rows = 8; rows < 14; rows++) {
+      GAvgPoolMicrokernelTester()
+        .rows(rows)
+        .channels(16)
+        .input_stride(19)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C16, channels_eq_16_multipass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t rows = 14; rows <= 35; rows += 7) {
+      GAvgPoolMicrokernelTester()
+        .rows(rows)
+        .channels(16)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C16, channels_eq_16_multipass_fulltile_with_input_stride) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t rows = 14; rows <= 35; rows += 7) {
+      GAvgPoolMicrokernelTester()
+        .rows(rows)
+        .channels(16)
+        .input_stride(19)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C16, channels_div_16_2pass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 32; channels < 128; channels += 16) {
+      GAvgPoolMicrokernelTester()
+        .rows(14)
+        .channels(channels)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C16, channels_div_16_2pass_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 32; channels < 128; channels += 16) {
+      for (size_t rows = 8; rows < 14; rows++) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C16, channels_div_16_multipass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 32; channels < 128; channels += 16) {
+      for (size_t rows = 14; rows <= 35; rows += 7) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C16, channels_div_16_multipass_fulltile_with_input_stride) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 32; channels < 128; channels += 16) {
+      for (size_t rows = 14; rows <= 35; rows += 7) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .input_stride(263)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C16, channels_lt_16_2pass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 16; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(14)
+        .channels(channels)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C16, channels_lt_16_2pass_fulltile_with_qmax) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 16; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(14)
+        .channels(channels)
+        .qmax(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C16, channels_lt_16_2pass_fulltile_with_qmin) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 16; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(14)
+        .channels(channels)
+        .qmin(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C16, channels_lt_16_2pass_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 16; channels++) {
+      for (size_t rows = 8; rows < 14; rows++) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C16, channels_lt_16_multipass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 16; channels++) {
+      for (size_t rows = 14; rows <= 35; rows += 7) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C16, channels_lt_16_multipass_fulltile_with_input_stride) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 16; channels++) {
+      for (size_t rows = 14; rows <= 35; rows += 7) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .input_stride(19)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C16, channels_gt_16_2pass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 17; channels < 32; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(14)
+        .channels(channels)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C16, channels_gt_16_2pass_fulltile_with_qmax) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 17; channels < 32; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(14)
+        .channels(channels)
+        .qmax(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C16, channels_gt_16_2pass_fulltile_with_qmin) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 17; channels < 32; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(14)
+        .channels(channels)
+        .qmin(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C16, channels_gt_16_2pass_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 17; channels < 32; channels++) {
+      for (size_t rows = 8; rows < 14; rows++) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C16, channels_gt_16_multipass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 17; channels < 32; channels++) {
+      for (size_t rows = 14; rows < 35; rows += 14) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C16, channels_gt_16_multipass_fulltile_with_input_stride) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 17; channels < 32; channels++) {
+      for (size_t rows = 14; rows < 35; rows += 14) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .input_stride(47)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C24, channels_eq_24_2pass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(14)
+      .channels(24)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C24, channels_eq_24_2pass_fulltile_with_input_stride) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(14)
+      .channels(24)
+      .input_stride(29)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C24, channels_eq_24_2pass_fulltile_with_qmax) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(14)
+      .channels(24)
+      .qmax(128)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C24, channels_eq_24_2pass_fulltile_with_qmin) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(14)
+      .channels(24)
+      .qmin(128)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C24, channels_eq_24_2pass_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t rows = 8; rows < 14; rows++) {
+      GAvgPoolMicrokernelTester()
+        .rows(rows)
+        .channels(24)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C24, channels_eq_24_2pass_subtile_with_input_stride) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t rows = 8; rows < 14; rows++) {
+      GAvgPoolMicrokernelTester()
+        .rows(rows)
+        .channels(24)
+        .input_stride(29)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C24, channels_eq_24_multipass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t rows = 14; rows <= 35; rows += 7) {
+      GAvgPoolMicrokernelTester()
+        .rows(rows)
+        .channels(24)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C24, channels_eq_24_multipass_fulltile_with_input_stride) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t rows = 14; rows <= 35; rows += 7) {
+      GAvgPoolMicrokernelTester()
+        .rows(rows)
+        .channels(24)
+        .input_stride(29)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C24, channels_div_24_2pass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 48; channels < 192; channels += 24) {
+      GAvgPoolMicrokernelTester()
+        .rows(14)
+        .channels(channels)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C24, channels_div_24_2pass_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 48; channels < 192; channels += 24) {
+      for (size_t rows = 8; rows < 14; rows++) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C24, channels_div_24_multipass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 48; channels < 192; channels += 24) {
+      for (size_t rows = 14; rows <= 35; rows += 7) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C24, channels_div_24_multipass_fulltile_with_input_stride) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 48; channels < 192; channels += 24) {
+      for (size_t rows = 14; rows <= 35; rows += 7) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .input_stride(389)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C24, channels_lt_24_2pass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 24; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(14)
+        .channels(channels)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C24, channels_lt_24_2pass_fulltile_with_qmax) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 24; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(14)
+        .channels(channels)
+        .qmax(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C24, channels_lt_24_2pass_fulltile_with_qmin) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 24; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(14)
+        .channels(channels)
+        .qmin(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C24, channels_lt_24_2pass_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 24; channels++) {
+      for (size_t rows = 8; rows < 14; rows++) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C24, channels_lt_24_multipass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 24; channels++) {
+      for (size_t rows = 14; rows <= 35; rows += 7) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C24, channels_lt_24_multipass_fulltile_with_input_stride) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 24; channels++) {
+      for (size_t rows = 14; rows <= 35; rows += 7) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .input_stride(29)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C24, channels_gt_24_2pass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 25; channels < 48; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(14)
+        .channels(channels)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C24, channels_gt_24_2pass_fulltile_with_qmax) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 25; channels < 48; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(14)
+        .channels(channels)
+        .qmax(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C24, channels_gt_24_2pass_fulltile_with_qmin) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 25; channels < 48; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(14)
+        .channels(channels)
+        .qmin(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C24, channels_gt_24_2pass_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 25; channels < 48; channels++) {
+      for (size_t rows = 8; rows < 14; rows++) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C24, channels_gt_24_multipass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 25; channels < 48; channels++) {
+      for (size_t rows = 14; rows < 35; rows += 14) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C24, channels_gt_24_multipass_fulltile_with_input_stride) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 25; channels < 48; channels++) {
+      for (size_t rows = 14; rows < 35; rows += 14) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .input_stride(61)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C32, channels_eq_32_2pass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(14)
+      .channels(32)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C32, channels_eq_32_2pass_fulltile_with_input_stride) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(14)
+      .channels(32)
+      .input_stride(37)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C32, channels_eq_32_2pass_fulltile_with_qmax) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(14)
+      .channels(32)
+      .qmax(128)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C32, channels_eq_32_2pass_fulltile_with_qmin) {
+    TEST_REQUIRES_X86_F16C;
+    GAvgPoolMicrokernelTester()
+      .rows(14)
+      .channels(32)
+      .qmin(128)
+      .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C32, channels_eq_32_2pass_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t rows = 8; rows < 14; rows++) {
+      GAvgPoolMicrokernelTester()
+        .rows(rows)
+        .channels(32)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C32, channels_eq_32_2pass_subtile_with_input_stride) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t rows = 8; rows < 14; rows++) {
+      GAvgPoolMicrokernelTester()
+        .rows(rows)
+        .channels(32)
+        .input_stride(37)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C32, channels_eq_32_multipass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t rows = 14; rows <= 35; rows += 7) {
+      GAvgPoolMicrokernelTester()
+        .rows(rows)
+        .channels(32)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C32, channels_eq_32_multipass_fulltile_with_input_stride) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t rows = 14; rows <= 35; rows += 7) {
+      GAvgPoolMicrokernelTester()
+        .rows(rows)
+        .channels(32)
+        .input_stride(37)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C32, channels_div_32_2pass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 64; channels < 256; channels += 32) {
+      GAvgPoolMicrokernelTester()
+        .rows(14)
+        .channels(channels)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C32, channels_div_32_2pass_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 64; channels < 256; channels += 32) {
+      for (size_t rows = 8; rows < 14; rows++) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C32, channels_div_32_multipass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 64; channels < 256; channels += 32) {
+      for (size_t rows = 14; rows <= 35; rows += 7) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C32, channels_div_32_multipass_fulltile_with_input_stride) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 64; channels < 256; channels += 32) {
+      for (size_t rows = 14; rows <= 35; rows += 7) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .input_stride(521)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C32, channels_lt_32_2pass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 32; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(14)
+        .channels(channels)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C32, channels_lt_32_2pass_fulltile_with_qmax) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 32; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(14)
+        .channels(channels)
+        .qmax(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C32, channels_lt_32_2pass_fulltile_with_qmin) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 32; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(14)
+        .channels(channels)
+        .qmin(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C32, channels_lt_32_2pass_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 32; channels++) {
+      for (size_t rows = 8; rows < 14; rows++) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C32, channels_lt_32_multipass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 32; channels++) {
+      for (size_t rows = 14; rows <= 35; rows += 7) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C32, channels_lt_32_multipass_fulltile_with_input_stride) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 1; channels < 32; channels++) {
+      for (size_t rows = 14; rows <= 35; rows += 7) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .input_stride(37)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C32, channels_gt_32_2pass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 33; channels < 64; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(14)
+        .channels(channels)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C32, channels_gt_32_2pass_fulltile_with_qmax) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 33; channels < 64; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(14)
+        .channels(channels)
+        .qmax(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C32, channels_gt_32_2pass_fulltile_with_qmin) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 33; channels < 64; channels++) {
+      GAvgPoolMicrokernelTester()
+        .rows(14)
+        .channels(channels)
+        .qmin(128)
+        .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C32, channels_gt_32_2pass_subtile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 33; channels < 64; channels++) {
+      for (size_t rows = 8; rows < 14; rows++) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C32, channels_gt_32_multipass_fulltile) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 33; channels < 64; channels++) {
+      for (size_t rows = 14; rows < 35; rows += 14) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+
+  TEST(F16_GAVGPOOL_MINMAX_7P7X__F16C_C32, channels_gt_32_multipass_fulltile_with_input_stride) {
+    TEST_REQUIRES_X86_F16C;
+    for (size_t channels = 33; channels < 64; channels++) {
+      for (size_t rows = 14; rows < 35; rows += 14) {
+        GAvgPoolMicrokernelTester()
+          .rows(rows)
+          .channels(channels)
+          .input_stride(79)
+          .Test(xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c32, xnn_init_f16_scaleminmax_avx_params);
+      }
+    }
+  }
+#endif  // XNN_ARCH_X86 || XNN_ARCH_X86_64
diff --git a/test/f16-gavgpool-minmax.yaml b/test/f16-gavgpool-minmax.yaml
index 224fd1b..5c43ac8 100644
--- a/test/f16-gavgpool-minmax.yaml
+++ b/test/f16-gavgpool-minmax.yaml
@@ -34,3 +34,19 @@
   init: xnn_init_f16_scaleminmax_neon_params
   arch:
     - aarch64
+- name: xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c8
+  init: xnn_init_f16_scaleminmax_avx_params
+- name: xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c16
+  init: xnn_init_f16_scaleminmax_avx_params
+- name: xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c24
+  init: xnn_init_f16_scaleminmax_avx_params
+- name: xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c32
+  init: xnn_init_f16_scaleminmax_avx_params
+- name: xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8
+  init: xnn_init_f16_scaleminmax_avx_params
+- name: xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c16
+  init: xnn_init_f16_scaleminmax_avx_params
+- name: xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c24
+  init: xnn_init_f16_scaleminmax_avx_params
+- name: xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c32
+  init: xnn_init_f16_scaleminmax_avx_params