QU8 C4 NEON Dot Product GEMM/IGEMM microkernels

- 2 dot products per vector.  A * W and A * zero_point.
- unsigned dot products with 2 sets of accumulators.
- subtract zero point accumulators from accumulators outside loop.
- 1x8, 4x8, 6x8, 8x8, 1x16, 4x16, 6x16, 8x16 GEMM and IGEMM.

PiperOrigin-RevId: 390067497
diff --git a/BUILD.bazel b/BUILD.bazel
index 43d09eb..7331068 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -3161,6 +3161,22 @@
     "src/qs8-igemm/gen/6x16c4-minmax-rndnu-neondot.c",
     "src/qs8-igemm/gen/8x8c4-minmax-rndnu-neondot.c",
     "src/qs8-igemm/gen/8x16c4-minmax-rndnu-neondot.c",
+    "src/qu8-gemm/gen/1x8c4-minmax-rndnu-neondot.c",
+    "src/qu8-gemm/gen/1x16c4-minmax-rndnu-neondot.c",
+    "src/qu8-gemm/gen/4x8c4-minmax-rndnu-neondot.c",
+    "src/qu8-gemm/gen/4x16c4-minmax-rndnu-neondot.c",
+    "src/qu8-gemm/gen/6x8c4-minmax-rndnu-neondot.c",
+    "src/qu8-gemm/gen/6x16c4-minmax-rndnu-neondot.c",
+    "src/qu8-gemm/gen/8x8c4-minmax-rndnu-neondot.c",
+    "src/qu8-gemm/gen/8x16c4-minmax-rndnu-neondot.c",
+    "src/qu8-igemm/gen/1x8c4-minmax-rndnu-neondot.c",
+    "src/qu8-igemm/gen/1x16c4-minmax-rndnu-neondot.c",
+    "src/qu8-igemm/gen/4x8c4-minmax-rndnu-neondot.c",
+    "src/qu8-igemm/gen/4x16c4-minmax-rndnu-neondot.c",
+    "src/qu8-igemm/gen/6x8c4-minmax-rndnu-neondot.c",
+    "src/qu8-igemm/gen/6x16c4-minmax-rndnu-neondot.c",
+    "src/qu8-igemm/gen/8x8c4-minmax-rndnu-neondot.c",
+    "src/qu8-igemm/gen/8x16c4-minmax-rndnu-neondot.c",
 ]
 
 PROD_SSE_MICROKERNEL_SRCS = [
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 3dd4eba..911ea99 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -2347,7 +2347,23 @@
   src/qs8-igemm/gen/6x8c4-minmax-rndnu-neondot.c
   src/qs8-igemm/gen/6x16c4-minmax-rndnu-neondot.c
   src/qs8-igemm/gen/8x8c4-minmax-rndnu-neondot.c
-  src/qs8-igemm/gen/8x16c4-minmax-rndnu-neondot.c)
+  src/qs8-igemm/gen/8x16c4-minmax-rndnu-neondot.c
+  src/qu8-gemm/gen/1x8c4-minmax-rndnu-neondot.c
+  src/qu8-gemm/gen/1x16c4-minmax-rndnu-neondot.c
+  src/qu8-gemm/gen/4x8c4-minmax-rndnu-neondot.c
+  src/qu8-gemm/gen/4x16c4-minmax-rndnu-neondot.c
+  src/qu8-gemm/gen/6x8c4-minmax-rndnu-neondot.c
+  src/qu8-gemm/gen/6x16c4-minmax-rndnu-neondot.c
+  src/qu8-gemm/gen/8x8c4-minmax-rndnu-neondot.c
+  src/qu8-gemm/gen/8x16c4-minmax-rndnu-neondot.c
+  src/qu8-igemm/gen/1x8c4-minmax-rndnu-neondot.c
+  src/qu8-igemm/gen/1x16c4-minmax-rndnu-neondot.c
+  src/qu8-igemm/gen/4x8c4-minmax-rndnu-neondot.c
+  src/qu8-igemm/gen/4x16c4-minmax-rndnu-neondot.c
+  src/qu8-igemm/gen/6x8c4-minmax-rndnu-neondot.c
+  src/qu8-igemm/gen/6x16c4-minmax-rndnu-neondot.c
+  src/qu8-igemm/gen/8x8c4-minmax-rndnu-neondot.c
+  src/qu8-igemm/gen/8x16c4-minmax-rndnu-neondot.c)
 
 SET(PROD_SSE_MICROKERNEL_SRCS
   src/f32-avgpool/9p8x-minmax-sse-c4.c
diff --git a/bench/qu8-gemm.cc b/bench/qu8-gemm.cc
index f1d66e3..3ee2200 100644
--- a/bench/qu8-gemm.cc
+++ b/bench/qu8-gemm.cc
@@ -299,6 +299,38 @@
 
 
 #if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  static void qu8_gemm_1x8c4__neondot(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot,
+      xnn_init_qu8_conv_minmax_rndnu_neon_params, 1, 8, 4, 1, benchmark::utils::CheckNEONDOT);
+  }
+  static void qu8_gemm_4x8c4__neondot(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot,
+      xnn_init_qu8_conv_minmax_rndnu_neon_params, 4, 8, 4, 1, benchmark::utils::CheckNEONDOT);
+  }
+  static void qu8_gemm_6x8c4__neondot(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot,
+      xnn_init_qu8_conv_minmax_rndnu_neon_params, 6, 8, 4, 1, benchmark::utils::CheckNEONDOT);
+  }
+  static void qu8_gemm_8x8c4__neondot(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot,
+      xnn_init_qu8_conv_minmax_rndnu_neon_params, 8, 8, 4, 1, benchmark::utils::CheckNEONDOT);
+  }
+  static void qu8_gemm_1x16c4__neondot(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot,
+      xnn_init_qu8_conv_minmax_rndnu_neon_params, 1, 16, 4, 1, benchmark::utils::CheckNEONDOT);
+  }
+  static void qu8_gemm_4x16c4__neondot(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot,
+      xnn_init_qu8_conv_minmax_rndnu_neon_params, 4, 16, 4, 1, benchmark::utils::CheckNEONDOT);
+  }
+  static void qu8_gemm_6x16c4__neondot(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot,
+      xnn_init_qu8_conv_minmax_rndnu_neon_params, 6, 16, 4, 1, benchmark::utils::CheckNEONDOT);
+  }
+  static void qu8_gemm_8x16c4__neondot(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot,
+      xnn_init_qu8_conv_minmax_rndnu_neon_params, 8, 16, 4, 1, benchmark::utils::CheckNEONDOT);
+  }
   static void qu8_gemm_1x16__neon_mlal_lane(benchmark::State& state, const char* net) {
     GEMMBenchmark(state,
       xnn_qu8_gemm_minmax_rndnu_ukernel_1x16__neon_mlal_lane,
@@ -314,6 +346,14 @@
       benchmark::utils::CheckNEON);
   }
 
+  BENCHMARK_GEMM(qu8_gemm_1x8c4__neondot)
+  BENCHMARK_GEMM(qu8_gemm_4x8c4__neondot)
+  BENCHMARK_GEMM(qu8_gemm_6x8c4__neondot)
+  BENCHMARK_GEMM(qu8_gemm_8x8c4__neondot)
+  BENCHMARK_GEMM(qu8_gemm_1x16c4__neondot)
+  BENCHMARK_GEMM(qu8_gemm_4x16c4__neondot)
+  BENCHMARK_GEMM(qu8_gemm_6x16c4__neondot)
+  BENCHMARK_GEMM(qu8_gemm_8x16c4__neondot)
   BENCHMARK_GEMM(qu8_gemm_1x16__neon_mlal_lane)
   BENCHMARK_GEMM(qu8_gemm_4x16__neon_mlal_lane)
 #endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
diff --git a/scripts/generate-qs8-gemm.sh b/scripts/generate-qs8-gemm.sh
index cca4218..3913bee 100755
--- a/scripts/generate-qs8-gemm.sh
+++ b/scripts/generate-qs8-gemm.sh
@@ -257,8 +257,10 @@
 tools/xngen src/qs8-gemm/c4-neondot.c.in -D MR=6  -D NR=16 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -o src/qc8-gemm/gen/6x16c4-minmax-fp32-neondot.c
 tools/xngen src/qs8-gemm/c4-neondot.c.in -D MR=8  -D NR=16 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -o src/qc8-gemm/gen/8x16c4-minmax-fp32-neondot.c
 
+tools/xngen src/qs8-gemm/c4-neondot.c.in -D MR=1  -D NR=8  -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -o src/qs8-gemm/gen/1x8c4-minmax-fp32-neondot.c
 tools/xngen src/qs8-gemm/c4-neondot.c.in -D MR=1  -D NR=8  -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -o src/qs8-gemm/gen/1x8c4-minmax-gemmlowp-neondot.c
 
+tools/xngen src/qs8-gemm/c4-neondot.c.in -D MR=1  -D NR=8  -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -o src/qs8-gemm/gen/1x8c4-minmax-rndnu-neondot.c
 tools/xngen src/qs8-gemm/c4-neondot.c.in -D MR=4  -D NR=8  -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -o src/qs8-gemm/gen/4x8c4-minmax-rndnu-neondot.c
 tools/xngen src/qs8-gemm/c4-neondot.c.in -D MR=6  -D NR=8  -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -o src/qs8-gemm/gen/6x8c4-minmax-rndnu-neondot.c
 tools/xngen src/qs8-gemm/c4-neondot.c.in -D MR=8  -D NR=8  -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -o src/qs8-gemm/gen/8x8c4-minmax-rndnu-neondot.c
@@ -267,6 +269,15 @@
 tools/xngen src/qs8-gemm/c4-neondot.c.in -D MR=6  -D NR=16 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -o src/qs8-gemm/gen/6x16c4-minmax-rndnu-neondot.c
 tools/xngen src/qs8-gemm/c4-neondot.c.in -D MR=8  -D NR=16 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -o src/qs8-gemm/gen/8x16c4-minmax-rndnu-neondot.c
 
+tools/xngen src/qu8-gemm/c4-neondot.c.in -D MR=1  -D NR=8  -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D DATATYPE=QU8 -o src/qu8-gemm/gen/1x8c4-minmax-rndnu-neondot.c
+tools/xngen src/qu8-gemm/c4-neondot.c.in -D MR=4  -D NR=8  -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D DATATYPE=QU8 -o src/qu8-gemm/gen/4x8c4-minmax-rndnu-neondot.c
+tools/xngen src/qu8-gemm/c4-neondot.c.in -D MR=6  -D NR=8  -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D DATATYPE=QU8 -o src/qu8-gemm/gen/6x8c4-minmax-rndnu-neondot.c
+tools/xngen src/qu8-gemm/c4-neondot.c.in -D MR=8  -D NR=8  -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D DATATYPE=QU8 -o src/qu8-gemm/gen/8x8c4-minmax-rndnu-neondot.c
+tools/xngen src/qu8-gemm/c4-neondot.c.in -D MR=1  -D NR=16 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D DATATYPE=QU8 -o src/qu8-gemm/gen/1x16c4-minmax-rndnu-neondot.c
+tools/xngen src/qu8-gemm/c4-neondot.c.in -D MR=4  -D NR=16 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D DATATYPE=QU8 -o src/qu8-gemm/gen/4x16c4-minmax-rndnu-neondot.c
+tools/xngen src/qu8-gemm/c4-neondot.c.in -D MR=6  -D NR=16 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D DATATYPE=QU8 -o src/qu8-gemm/gen/6x16c4-minmax-rndnu-neondot.c
+tools/xngen src/qu8-gemm/c4-neondot.c.in -D MR=8  -D NR=16 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D DATATYPE=QU8 -o src/qu8-gemm/gen/8x16c4-minmax-rndnu-neondot.c
+
 ############################### AArch64 assembly ##############################
 # Cortex A53 micro-kernel
 tools/xngen src/qs8-gemm/4x16-aarch64-neon-mlal-lane-cortex-a53.S.in   -D PREFETCH=0 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D DATATYPE=QS8 -o src/qs8-gemm/gen/4x16-minmax-gemmlowp-aarch64-neon-mlal-lane-cortex-a53.S
diff --git a/scripts/generate-qs8-igemm.sh b/scripts/generate-qs8-igemm.sh
index 369f185..1d0ebe3 100755
--- a/scripts/generate-qs8-igemm.sh
+++ b/scripts/generate-qs8-igemm.sh
@@ -285,6 +285,15 @@
 tools/xngen src/qs8-igemm/c4-neondot.c.in -D MR=6  -D NR=16 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -o src/qs8-igemm/gen/6x16c4-minmax-rndnu-neondot.c
 tools/xngen src/qs8-igemm/c4-neondot.c.in -D MR=8  -D NR=16 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -o src/qs8-igemm/gen/8x16c4-minmax-rndnu-neondot.c
 
+tools/xngen src/qu8-igemm/c4-neondot.c.in -D MR=1  -D NR=8  -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D DATATYPE=QU8 -o src/qu8-igemm/gen/1x8c4-minmax-rndnu-neondot.c
+tools/xngen src/qu8-igemm/c4-neondot.c.in -D MR=4  -D NR=8  -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D DATATYPE=QU8 -o src/qu8-igemm/gen/4x8c4-minmax-rndnu-neondot.c
+tools/xngen src/qu8-igemm/c4-neondot.c.in -D MR=6  -D NR=8  -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D DATATYPE=QU8 -o src/qu8-igemm/gen/6x8c4-minmax-rndnu-neondot.c
+tools/xngen src/qu8-igemm/c4-neondot.c.in -D MR=8  -D NR=8  -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D DATATYPE=QU8 -o src/qu8-igemm/gen/8x8c4-minmax-rndnu-neondot.c
+tools/xngen src/qu8-igemm/c4-neondot.c.in -D MR=1  -D NR=16 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D DATATYPE=QU8 -o src/qu8-igemm/gen/1x16c4-minmax-rndnu-neondot.c
+tools/xngen src/qu8-igemm/c4-neondot.c.in -D MR=4  -D NR=16 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D DATATYPE=QU8 -o src/qu8-igemm/gen/4x16c4-minmax-rndnu-neondot.c
+tools/xngen src/qu8-igemm/c4-neondot.c.in -D MR=6  -D NR=16 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D DATATYPE=QU8 -o src/qu8-igemm/gen/6x16c4-minmax-rndnu-neondot.c
+tools/xngen src/qu8-igemm/c4-neondot.c.in -D MR=8  -D NR=16 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D DATATYPE=QU8 -o src/qu8-igemm/gen/8x16c4-minmax-rndnu-neondot.c
+
 ############################### AArch64 assembly ##############################
 # Cortex A53 micro-kernel
 tools/xngen src/qs8-igemm/4x16-aarch64-neon-mlal-lane-cortex-a53.S.in   -D PREFETCH=0 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D DATATYPE=QS8 -o src/qs8-igemm/gen/4x16-minmax-gemmlowp-aarch64-neon-mlal-lane-cortex-a53.S
diff --git a/src/qu8-gemm/c4-neondot.c.in b/src/qu8-gemm/c4-neondot.c.in
new file mode 100644
index 0000000..ce81c94
--- /dev/null
+++ b/src/qu8-gemm/c4-neondot.c.in
@@ -0,0 +1,305 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+$assert NR % 8 == 0
+$assert 8 <= NR <= 16
+$assert REQUANTIZATION == "RNDNU"
+$assert DATATYPE == "QU8"
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+$if REQUANTIZATION == "FP32":
+  #include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+
+$PARAMS_STRUCT = "fp32_neonv8" if REQUANTIZATION == "FP32" else REQUANTIZATION.lower() + "_neon"
+void xnn_${DATATYPE.lower()}_gemm_minmax_${REQUANTIZATION.lower()}_ukernel_${MR}x${NR}c4__neondot(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const uint8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    uint8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= ${MR});
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(uint8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 4 * sizeof(uint8_t));
+  const uint8_t* a0 = a;
+  uint8_t* c0 = c;
+  $for M in range(1, MR):
+    const uint8_t* a${M} = (const uint8_t*) ((uintptr_t) a${M-1} + a_stride);
+    uint8_t* c${M} = (uint8_t*) ((uintptr_t) c${M-1} + cm_stride);
+    $if M % 2 == 0:
+      if XNN_UNPREDICTABLE(mr <= ${M}) {
+        a${M} = a${M-1};
+        c${M} = c${M-1};
+      }
+    $elif M + 1 == MR:
+      if XNN_UNPREDICTABLE(mr != ${M+1}) {
+        a${M} = a${M-1};
+        c${M} = c${M-1};
+      }
+    $else:
+      if XNN_UNPREDICTABLE(mr < ${M+1}) {
+        a${M} = a${M-1};
+        c${M} = c${M-1};
+      }
+
+  const uint8x16_t vb_zero_point = vld1q_dup_u8(&params->${PARAMS_STRUCT}.kernel_zero_point[0]);
+
+  // Loop over groups of ${NR} columns.
+  do {
+    // Initialize accumulators with bias. ${NR} bias values are loaded from the
+    // weight matrix, at the start of the group of ${NR} columns.
+    $for N in range(0, NR, 4):
+      uint32x4_t vpacc0x${ABC[N:N+4]} = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    $for M in range(1, MR):
+      $for N in range(0, NR, 4):
+        uint32x4_t vpacc${M}x${ABC[N:N+4]} = vpacc0x${ABC[N:N+4]};
+    $for M in range(0, MR):
+      $for N in range(0, NR, 4):
+        uint32x4_t vnacc${M}x${ABC[N:N+4]} = vmovq_n_u32(0);
+
+    // Inner accumulation loop along the ${NR} columns.
+    size_t k = kc;
+    // 2x partial unrolled loop to load 8 bytes at a time.
+    while (k >= 8 * sizeof(uint8_t)) {
+      // Load a ${MR}x8 block of activations.
+      $for M in range(MR):
+        const uint8x8_t va${M}x01234567 = vld1_u8(a${M}); a${M} += 8;
+
+      // Load a 8x${NR} block of weights.
+      $for K in range(0, 8, 4):
+        $for N in range(0, NR, 4):
+          const uint8x16_t vb${ABC[K:K+4]}x${ABC[N:N+4]} = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+      // Multiply-accumulate: ${MR}x8 * 8x${NR} --> ${MR}x${NR}.
+      $for K in range(0, 8, 4):
+        $for M in range(MR):
+          $for N in range(0, NR, 4):
+            vpacc${M}x${ABC[N:N+4]} = vdotq_lane_u32(vpacc${M}x${ABC[N:N+4]}, vb${ABC[K:K+4]}x${ABC[N:N+4]}, va${M}x01234567, ${K//4});
+            vnacc${M}x${ABC[N:N+4]} = vdotq_lane_u32(vnacc${M}x${ABC[N:N+4]}, vb_zero_point, va${M}x01234567, ${K//4});
+
+      k -= 8 * sizeof(uint8_t);
+    }
+    // Handle up to 4 final positions of `k`
+    if XNN_UNLIKELY(k != 0) {
+      // Load a ${MR}x4 block of activations.
+      $for M in range(MR):
+        const uint8x8_t va${M}x01234567 = vld1_u8(a${M}); a${M} += 4;
+
+      // Load a 4x${NR} block of weights.
+      $for N in range(0, NR, 4):
+        const uint8x16_t vb0123x${ABC[N:N+4]} = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+      // Multiply-accumulate: ${MR}x4 * 4x${NR} --> ${MR}x${NR}.
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vpacc${M}x${ABC[N:N+4]} = vdotq_lane_u32(vpacc${M}x${ABC[N:N+4]}, vb0123x${ABC[N:N+4]}, va${M}x01234567, 0);
+          vnacc${M}x${ABC[N:N+4]} = vdotq_lane_u32(vnacc${M}x${ABC[N:N+4]}, vb_zero_point, va${M}x01234567, 0);
+    }
+
+    // Subtract zero point accumulators with accumulators.
+    $for M in range(0, MR):
+      $for N in range(0, NR, 4):
+        int32x4_t vacc${M}x${ABC[N:N+4]} = vreinterpretq_s32_u32(vsubq_u32(vpacc${M}x${ABC[N:N+4]}, vnacc${M}x${ABC[N:N+4]}));
+
+    $if REQUANTIZATION == "GEMMLOWP":
+      const int32x4_t vmultiplier = vld1q_dup_s32(&params->${PARAMS_STRUCT}.multiplier);
+      const int32x4_t vright_shift = vld1q_dup_s32(&params->${PARAMS_STRUCT}.right_shift);
+      const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0)));
+
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vacc${M}x${ABC[N:N+4]} = vqrdmulhq_s32(vacc${M}x${ABC[N:N+4]}, vmultiplier);
+
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vacc${M}x${ABC[N:N+4]} = vsraq_n_s32(vacc${M}x${ABC[N:N+4]}, vbicq_s32(vacc${M}x${ABC[N:N+4]}, vzero_shift_mask), 31);
+
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vacc${M}x${ABC[N:N+4]} = vrshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_shift);
+    $elif REQUANTIZATION == "RNDNU":
+      const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->${PARAMS_STRUCT}.right_pre_shift);
+      const int32x4_t vmultiplier = vld1q_dup_s32(&params->${PARAMS_STRUCT}.multiplier);
+      const int32x4_t vright_post_shift = vld1q_dup_s32(&params->${PARAMS_STRUCT}.right_post_shift);
+
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vacc${M}x${ABC[N:N+4]} = vshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_pre_shift);
+
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vacc${M}x${ABC[N:N+4]} = vqdmulhq_s32(vacc${M}x${ABC[N:N+4]}, vmultiplier);
+
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vacc${M}x${ABC[N:N+4]} = vrshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_post_shift);
+    $elif REQUANTIZATION == "FP32":
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          float32x4_t vfpacc${M}x${ABC[N:N+4]} = vcvtq_f32_s32(vacc${M}x${ABC[N:N+4]});
+
+      const float32x4_t vscale = vld1q_dup_f32(&params->${PARAMS_STRUCT}.scale);
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vfpacc${M}x${ABC[N:N+4]} = vmulq_f32(vfpacc${M}x${ABC[N:N+4]}, vscale);
+
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vacc${M}x${ABC[N:N+4]} = vcvtnq_s32_f32(vfpacc${M}x${ABC[N:N+4]});
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->${PARAMS_STRUCT}.output_zero_point);
+#if XNN_ARCH_ARM64
+    $for M in range(MR):
+      $for N in range(0, NR, 8):
+        const int16x8_t vacc${M}x${ABC[N:N+8]} = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc${M}x${ABC[N:N+4]}), vacc${M}x${ABC[N+4:N+8]}), voutput_zero_point);
+
+    $for M in range(MR):
+      $for N in range(0, NR, 16):
+        $if N + 8 < NR:
+          uint8x16_t vout${M}x${ABC[N:N+16]} = vqmovun_high_s16(vqmovun_s16(vacc${M}x${ABC[N:N+8]}), vacc${M}x${ABC[N+8:N+16]});
+        $elif M % 2 == 1:
+          uint8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vqmovun_high_s16(vqmovun_s16(vacc${M-1}x${ABC[N:N+8]}), vacc${M}x${ABC[N:N+8]});
+        $elif M + 1 == MR:
+          uint8x8_t vout${M}x${ABC[N:N+8]} = vqmovun_s16(vacc${M}x${ABC[N:N+8]});
+#else
+    $for M in range(MR):
+      $for N in range(0, NR, 8):
+        const int16x8_t vacc${M}x${ABC[N:N+8]} = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc${M}x${ABC[N:N+4]}), vqmovn_s32(vacc${M}x${ABC[N+4:N+8]})), voutput_zero_point);
+
+    $for M in range(MR):
+      $for N in range(0, NR, 16):
+        $if N + 8 < NR:
+          uint8x16_t vout${M}x${ABC[N:N+16]} = vcombine_u8(vqmovun_s16(vacc${M}x${ABC[N:N+8]}), vqmovun_s16(vacc${M}x${ABC[N+8:N+16]}));
+        $elif M % 2 == 1:
+          uint8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vcombine_u8(vqmovun_s16(vacc${M-1}x${ABC[N:N+8]}), vqmovun_s16(vacc${M}x${ABC[N:N+8]}));
+        $elif M + 1 == MR:
+          uint8x8_t vout${M}x${ABC[N:N+8]} = vqmovun_s16(vacc${M}x${ABC[N:N+8]});
+#endif
+    $if NR == 8 and MR == 1:
+      const uint8x8_t voutput_min = vld1_dup_u8(&params->${PARAMS_STRUCT}.output_min);
+      const uint8x8_t voutput_max = vld1_dup_u8(&params->${PARAMS_STRUCT}.output_max);
+    $else:
+      const uint8x16_t voutput_min = vld1q_dup_u8(&params->${PARAMS_STRUCT}.output_min);
+      const uint8x16_t voutput_max = vld1q_dup_u8(&params->${PARAMS_STRUCT}.output_max);
+
+    $for M in range(MR):
+      $for N in range(0, NR, 16):
+        $if N + 8 < NR:
+          vout${M}x${ABC[N:N+16]} = vmaxq_u8(vout${M}x${ABC[N:N+16]}, voutput_min);
+        $elif M % 2 == 1:
+          vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vmaxq_u8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}, voutput_min);
+        $elif M + 1 == MR:
+          $if NR == 8 and MR == 1:
+            vout${M}x${ABC[N:N+8]} = vmax_u8(vout${M}x${ABC[N:N+8]}, voutput_min);
+          $else:
+            vout${M}x${ABC[N:N+8]} = vmax_u8(vout${M}x${ABC[N:N+8]}, vget_low_u8(voutput_min));
+
+    $for M in range(MR):
+      $for N in range(0, NR, 16):
+        $if N + 8 < NR:
+          vout${M}x${ABC[N:N+16]} = vminq_u8(vout${M}x${ABC[N:N+16]}, voutput_max);
+        $elif M % 2 == 1:
+          vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vminq_u8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}, voutput_max);
+        $elif M + 1 == MR:
+          $if NR == 8 and MR == 1:
+            vout${M}x${ABC[N:N+8]} = vmin_u8(vout${M}x${ABC[N:N+8]}, voutput_max);
+          $else:
+            vout${M}x${ABC[N:N+8]} = vmin_u8(vout${M}x${ABC[N:N+8]}, vget_low_u8(voutput_max));
+
+    if (nc >= ${NR}) {
+      $for M in range(MR):
+        $for N in range(0, NR, 16):
+          $if N + 8 < NR:
+            vst1q_u8(c${M} + ${N}, vout${M}x${ABC[N:N+16]});
+          $elif M % 2 == 1:
+            vst1_u8(c${M-1} + ${N}, vget_low_u8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}));
+            vst1_u8(c${M} + ${N}, vget_high_u8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}));
+          $elif M + 1 == MR:
+            vst1_u8(c${M} + ${N}, vout${M}x${ABC[N:N+8]});
+
+      $for M in range(MR):
+        c${M} = (uint8_t*) ((uintptr_t) c${M} + cn_stride);
+
+      $for M in range(MR):
+        a${M} = (const uint8_t*) ((uintptr_t) a${M} - kc);
+
+      nc -= ${NR};
+    } else {
+      $if NR == 16:
+        $for M in range(MR):
+          $if M % 2 == 1:
+            uint8x16_t vout${M-1}x01234567_${M}x01234567 = vcombine_u8(vget_low_u8(vout${M-1}x0123456789ABCDEF), vget_low_u8(vout${M}x0123456789ABCDEF));
+          $elif M + 1 == MR:
+            uint8x8_t vout${M}x01234567 = vget_low_u8(vout${M}x0123456789ABCDEF);
+        if (nc & 8) {
+          $for M in range(MR):
+            $if M % 2 == 1:
+              vst1_u8(c${M-1}, vget_low_u8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]})); c${M-1} += 8;
+              vst1_u8(c${M}, vget_high_u8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]})); c${M} += 8;
+            $elif M + 1 == MR:
+              vst1_u8(c${M}, vout${M}x${ABC[N:N+8]}); c${M} += 8;
+          $for M in range(MR):
+            $if M % 2 == 1:
+              vout${M-1}x01234567_${M}x01234567 = vcombine_u8(vget_high_u8(vout${M-1}x0123456789ABCDEF), vget_high_u8(vout${M}x0123456789ABCDEF));
+            $elif M + 1 == MR:
+              vout${M}x01234567 = vget_high_u8(vout${M}x0123456789ABCDEF);
+        }
+      if (nc & 4) {
+        $for M in range(MR):
+          $if M % 2 == 1:
+            vst1q_lane_u32(__builtin_assume_aligned(c${M-1}, 1), vreinterpretq_u32_u8(vout${M-1}x01234567_${M}x01234567), 0); c${M-1} += 4;
+            vst1q_lane_u32(__builtin_assume_aligned(c${M}, 1), vreinterpretq_u32_u8(vout${M-1}x01234567_${M}x01234567), 2); c${M} += 4;
+          $elif M + 1 == MR:
+            vst1_lane_u32(__builtin_assume_aligned(c${M}, 1), vreinterpret_u32_u8(vout${M}x01234567), 0); c${M} += 4;
+        $for M in range(MR):
+          $if M % 2 == 1:
+            vout${M-1}x01234567_${M}x01234567 = vextq_u8(vout${M-1}x01234567_${M}x01234567, vout${M-1}x01234567_${M}x01234567, 4);
+          $elif M + 1 == MR:
+            vout${M}x01234567 = vext_u8(vout${M}x01234567, vout${M}x01234567, 4);
+      }
+      if (nc & 2) {
+        $for M in range(MR):
+          $if M % 2 == 1:
+            vst1q_lane_u16(__builtin_assume_aligned(c${M-1}, 1), vreinterpretq_u16_u8(vout${M-1}x01234567_${M}x01234567), 0); c${M-1} += 2;
+            vst1q_lane_u16(__builtin_assume_aligned(c${M}, 1), vreinterpretq_u16_u8(vout${M-1}x01234567_${M}x01234567), 4); c${M} += 2;
+          $elif M + 1 == MR:
+            vst1_lane_u16(__builtin_assume_aligned(c${M}, 1), vreinterpret_u16_u8(vout${M}x01234567), 0); c${M} += 2;
+        $for M in range(MR):
+          $if M % 2 == 1:
+            vout${M-1}x01234567_${M}x01234567 = vextq_u8(vout${M-1}x01234567_${M}x01234567, vout${M-1}x01234567_${M}x01234567, 2);
+          $elif M + 1 == MR:
+            vout${M}x01234567 = vext_u8(vout${M}x01234567, vout${M}x01234567, 2);
+      }
+      if (nc & 1) {
+        $for M in range(MR):
+          $if M % 2 == 1:
+            vst1q_lane_u8(c${M-1}, vout${M-1}x01234567_${M}x01234567, 0);
+            vst1q_lane_u8(c${M}, vout${M-1}x01234567_${M}x01234567, 8);
+          $elif M + 1 == MR:
+            vst1_lane_u8(c${M}, vout${M}x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qu8-gemm/gen/1x16c4-minmax-rndnu-neondot.c b/src/qu8-gemm/gen/1x16c4-minmax-rndnu-neondot.c
new file mode 100644
index 0000000..f000c4b
--- /dev/null
+++ b/src/qu8-gemm/gen/1x16c4-minmax-rndnu-neondot.c
@@ -0,0 +1,190 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qu8-gemm/c4-neondot.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const uint8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    uint8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(uint8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 4 * sizeof(uint8_t));
+  const uint8_t* a0 = a;
+  uint8_t* c0 = c;
+
+  const uint8x16_t vb_zero_point = vld1q_dup_u8(&params->rndnu_neon.kernel_zero_point[0]);
+
+  // Loop over groups of 16 columns.
+  do {
+    // Initialize accumulators with bias. 16 bias values are loaded from the
+    // weight matrix, at the start of the group of 16 columns.
+    uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0x89AB = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0xCDEF = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vnacc0x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc0x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc0x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc0xCDEF = vmovq_n_u32(0);
+
+    // Inner accumulation loop along the 16 columns.
+    size_t k = kc;
+    // 2x partial unrolled loop to load 8 bytes at a time.
+    while (k >= 8 * sizeof(uint8_t)) {
+      // Load a 1x8 block of activations.
+      const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
+
+      // Load a 8x16 block of weights.
+      const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb4567x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb4567xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+      // Multiply-accumulate: 1x8 * 8x16 --> 1x16.
+      vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+      vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+      vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+      vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+      vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
+      vnacc0x89AB = vdotq_lane_u32(vnacc0x89AB, vb_zero_point, va0x01234567, 0);
+      vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
+      vnacc0xCDEF = vdotq_lane_u32(vnacc0xCDEF, vb_zero_point, va0x01234567, 0);
+      vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
+      vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 1);
+      vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
+      vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 1);
+      vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb4567x89AB, va0x01234567, 1);
+      vnacc0x89AB = vdotq_lane_u32(vnacc0x89AB, vb_zero_point, va0x01234567, 1);
+      vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb4567xCDEF, va0x01234567, 1);
+      vnacc0xCDEF = vdotq_lane_u32(vnacc0xCDEF, vb_zero_point, va0x01234567, 1);
+
+      k -= 8 * sizeof(uint8_t);
+    }
+    // Handle up to 4 final positions of `k`
+    if XNN_UNLIKELY(k != 0) {
+      // Load a 1x4 block of activations.
+      const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 4;
+
+      // Load a 4x16 block of weights.
+      const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+      // Multiply-accumulate: 1x4 * 4x16 --> 1x16.
+      vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+      vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+      vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+      vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+      vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
+      vnacc0x89AB = vdotq_lane_u32(vnacc0x89AB, vb_zero_point, va0x01234567, 0);
+      vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
+      vnacc0xCDEF = vdotq_lane_u32(vnacc0xCDEF, vb_zero_point, va0x01234567, 0);
+    }
+
+    // Subtract zero point accumulators with accumulators.
+    int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
+    int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x4567));
+    int32x4_t vacc0x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc0x89AB, vnacc0x89AB));
+    int32x4_t vacc0xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc0xCDEF, vnacc0xCDEF));
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+
+    uint8x16_t vout0x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc0x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+
+    uint8x16_t vout0x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc0x89ABCDEF));
+#endif
+    const uint8x16_t voutput_min = vld1q_dup_u8(&params->rndnu_neon.output_min);
+    const uint8x16_t voutput_max = vld1q_dup_u8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_u8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_u8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_u8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
+
+      nc -= 16;
+    } else {
+      uint8x8_t vout0x01234567 = vget_low_u8(vout0x0123456789ABCDEF);
+      if (nc & 8) {
+        vst1_u8(c0, vout0x01234567); c0 += 8;
+        vout0x01234567 = vget_high_u8(vout0x0123456789ABCDEF);
+      }
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_u8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_u8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_u8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_u8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_u8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qu8-gemm/gen/1x8c4-minmax-rndnu-neondot.c b/src/qu8-gemm/gen/1x8c4-minmax-rndnu-neondot.c
new file mode 100644
index 0000000..59c64d8
--- /dev/null
+++ b/src/qu8-gemm/gen/1x8c4-minmax-rndnu-neondot.c
@@ -0,0 +1,153 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qu8-gemm/c4-neondot.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const uint8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    uint8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(uint8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 4 * sizeof(uint8_t));
+  const uint8_t* a0 = a;
+  uint8_t* c0 = c;
+
+  const uint8x16_t vb_zero_point = vld1q_dup_u8(&params->rndnu_neon.kernel_zero_point[0]);
+
+  // Loop over groups of 8 columns.
+  do {
+    // Initialize accumulators with bias. 8 bias values are loaded from the
+    // weight matrix, at the start of the group of 8 columns.
+    uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vnacc0x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc0x4567 = vmovq_n_u32(0);
+
+    // Inner accumulation loop along the 8 columns.
+    size_t k = kc;
+    // 2x partial unrolled loop to load 8 bytes at a time.
+    while (k >= 8 * sizeof(uint8_t)) {
+      // Load a 1x8 block of activations.
+      const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
+
+      // Load a 8x8 block of weights.
+      const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+      // Multiply-accumulate: 1x8 * 8x8 --> 1x8.
+      vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+      vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+      vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+      vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+      vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
+      vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 1);
+      vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
+      vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 1);
+
+      k -= 8 * sizeof(uint8_t);
+    }
+    // Handle up to 4 final positions of `k`
+    if XNN_UNLIKELY(k != 0) {
+      // Load a 1x4 block of activations.
+      const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 4;
+
+      // Load a 4x8 block of weights.
+      const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+      // Multiply-accumulate: 1x4 * 4x8 --> 1x8.
+      vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+      vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+      vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+      vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+    }
+
+    // Subtract zero point accumulators with accumulators.
+    int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
+    int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x4567));
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    uint8x8_t vout0x01234567 = vqmovun_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    uint8x8_t vout0x01234567 = vqmovun_s16(vacc0x01234567);
+#endif
+    const uint8x8_t voutput_min = vld1_dup_u8(&params->rndnu_neon.output_min);
+    const uint8x8_t voutput_max = vld1_dup_u8(&params->rndnu_neon.output_max);
+
+    vout0x01234567 = vmax_u8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_u8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_u8(c0 + 0, vout0x01234567);
+
+      c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_u8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_u8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_u8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_u8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_u8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qu8-gemm/gen/4x16c4-minmax-rndnu-neondot.c b/src/qu8-gemm/gen/4x16c4-minmax-rndnu-neondot.c
new file mode 100644
index 0000000..69685df
--- /dev/null
+++ b/src/qu8-gemm/gen/4x16c4-minmax-rndnu-neondot.c
@@ -0,0 +1,407 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qu8-gemm/c4-neondot.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const uint8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    uint8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(uint8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 4 * sizeof(uint8_t));
+  const uint8_t* a0 = a;
+  uint8_t* c0 = c;
+  const uint8_t* a1 = (const uint8_t*) ((uintptr_t) a0 + a_stride);
+  uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const uint8_t* a2 = (const uint8_t*) ((uintptr_t) a1 + a_stride);
+  uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const uint8_t* a3 = (const uint8_t*) ((uintptr_t) a2 + a_stride);
+  uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+
+  const uint8x16_t vb_zero_point = vld1q_dup_u8(&params->rndnu_neon.kernel_zero_point[0]);
+
+  // Loop over groups of 16 columns.
+  do {
+    // Initialize accumulators with bias. 16 bias values are loaded from the
+    // weight matrix, at the start of the group of 16 columns.
+    uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0x89AB = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0xCDEF = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc1x0123 = vpacc0x0123;
+    uint32x4_t vpacc1x4567 = vpacc0x4567;
+    uint32x4_t vpacc1x89AB = vpacc0x89AB;
+    uint32x4_t vpacc1xCDEF = vpacc0xCDEF;
+    uint32x4_t vpacc2x0123 = vpacc0x0123;
+    uint32x4_t vpacc2x4567 = vpacc0x4567;
+    uint32x4_t vpacc2x89AB = vpacc0x89AB;
+    uint32x4_t vpacc2xCDEF = vpacc0xCDEF;
+    uint32x4_t vpacc3x0123 = vpacc0x0123;
+    uint32x4_t vpacc3x4567 = vpacc0x4567;
+    uint32x4_t vpacc3x89AB = vpacc0x89AB;
+    uint32x4_t vpacc3xCDEF = vpacc0xCDEF;
+    uint32x4_t vnacc0x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc0x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc0x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc0xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc1x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc1x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc1x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc1xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc2x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc2x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc2x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc2xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc3x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc3x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc3x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc3xCDEF = vmovq_n_u32(0);
+
+    // Inner accumulation loop along the 16 columns.
+    size_t k = kc;
+    // 2x partial unrolled loop to load 8 bytes at a time.
+    while (k >= 8 * sizeof(uint8_t)) {
+      // Load a 4x8 block of activations.
+      const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
+      const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
+      const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 8;
+      const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 8;
+
+      // Load a 8x16 block of weights.
+      const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb4567x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb4567xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+      // Multiply-accumulate: 4x8 * 8x16 --> 4x16.
+      vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+      vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+      vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+      vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+      vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
+      vnacc0x89AB = vdotq_lane_u32(vnacc0x89AB, vb_zero_point, va0x01234567, 0);
+      vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
+      vnacc0xCDEF = vdotq_lane_u32(vnacc0xCDEF, vb_zero_point, va0x01234567, 0);
+      vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
+      vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 0);
+      vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
+      vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 0);
+      vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
+      vnacc1x89AB = vdotq_lane_u32(vnacc1x89AB, vb_zero_point, va1x01234567, 0);
+      vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
+      vnacc1xCDEF = vdotq_lane_u32(vnacc1xCDEF, vb_zero_point, va1x01234567, 0);
+      vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
+      vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 0);
+      vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
+      vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 0);
+      vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
+      vnacc2x89AB = vdotq_lane_u32(vnacc2x89AB, vb_zero_point, va2x01234567, 0);
+      vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
+      vnacc2xCDEF = vdotq_lane_u32(vnacc2xCDEF, vb_zero_point, va2x01234567, 0);
+      vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
+      vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 0);
+      vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
+      vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 0);
+      vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb0123x89AB, va3x01234567, 0);
+      vnacc3x89AB = vdotq_lane_u32(vnacc3x89AB, vb_zero_point, va3x01234567, 0);
+      vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
+      vnacc3xCDEF = vdotq_lane_u32(vnacc3xCDEF, vb_zero_point, va3x01234567, 0);
+      vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
+      vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 1);
+      vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
+      vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 1);
+      vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb4567x89AB, va0x01234567, 1);
+      vnacc0x89AB = vdotq_lane_u32(vnacc0x89AB, vb_zero_point, va0x01234567, 1);
+      vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb4567xCDEF, va0x01234567, 1);
+      vnacc0xCDEF = vdotq_lane_u32(vnacc0xCDEF, vb_zero_point, va0x01234567, 1);
+      vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
+      vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 1);
+      vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
+      vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 1);
+      vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb4567x89AB, va1x01234567, 1);
+      vnacc1x89AB = vdotq_lane_u32(vnacc1x89AB, vb_zero_point, va1x01234567, 1);
+      vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb4567xCDEF, va1x01234567, 1);
+      vnacc1xCDEF = vdotq_lane_u32(vnacc1xCDEF, vb_zero_point, va1x01234567, 1);
+      vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb4567x0123, va2x01234567, 1);
+      vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 1);
+      vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb4567x4567, va2x01234567, 1);
+      vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 1);
+      vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb4567x89AB, va2x01234567, 1);
+      vnacc2x89AB = vdotq_lane_u32(vnacc2x89AB, vb_zero_point, va2x01234567, 1);
+      vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb4567xCDEF, va2x01234567, 1);
+      vnacc2xCDEF = vdotq_lane_u32(vnacc2xCDEF, vb_zero_point, va2x01234567, 1);
+      vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb4567x0123, va3x01234567, 1);
+      vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 1);
+      vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb4567x4567, va3x01234567, 1);
+      vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 1);
+      vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb4567x89AB, va3x01234567, 1);
+      vnacc3x89AB = vdotq_lane_u32(vnacc3x89AB, vb_zero_point, va3x01234567, 1);
+      vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb4567xCDEF, va3x01234567, 1);
+      vnacc3xCDEF = vdotq_lane_u32(vnacc3xCDEF, vb_zero_point, va3x01234567, 1);
+
+      k -= 8 * sizeof(uint8_t);
+    }
+    // Handle up to 4 final positions of `k`
+    if XNN_UNLIKELY(k != 0) {
+      // Load a 4x4 block of activations.
+      const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 4;
+      const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 4;
+      const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 4;
+      const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 4;
+
+      // Load a 4x16 block of weights.
+      const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+      // Multiply-accumulate: 4x4 * 4x16 --> 4x16.
+      vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+      vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+      vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+      vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+      vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
+      vnacc0x89AB = vdotq_lane_u32(vnacc0x89AB, vb_zero_point, va0x01234567, 0);
+      vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
+      vnacc0xCDEF = vdotq_lane_u32(vnacc0xCDEF, vb_zero_point, va0x01234567, 0);
+      vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
+      vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 0);
+      vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
+      vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 0);
+      vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
+      vnacc1x89AB = vdotq_lane_u32(vnacc1x89AB, vb_zero_point, va1x01234567, 0);
+      vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
+      vnacc1xCDEF = vdotq_lane_u32(vnacc1xCDEF, vb_zero_point, va1x01234567, 0);
+      vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
+      vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 0);
+      vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
+      vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 0);
+      vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
+      vnacc2x89AB = vdotq_lane_u32(vnacc2x89AB, vb_zero_point, va2x01234567, 0);
+      vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
+      vnacc2xCDEF = vdotq_lane_u32(vnacc2xCDEF, vb_zero_point, va2x01234567, 0);
+      vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
+      vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 0);
+      vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
+      vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 0);
+      vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb0123x89AB, va3x01234567, 0);
+      vnacc3x89AB = vdotq_lane_u32(vnacc3x89AB, vb_zero_point, va3x01234567, 0);
+      vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
+      vnacc3xCDEF = vdotq_lane_u32(vnacc3xCDEF, vb_zero_point, va3x01234567, 0);
+    }
+
+    // Subtract zero point accumulators with accumulators.
+    int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
+    int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x4567));
+    int32x4_t vacc0x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc0x89AB, vnacc0x89AB));
+    int32x4_t vacc0xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc0xCDEF, vnacc0xCDEF));
+    int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
+    int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x4567));
+    int32x4_t vacc1x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc1x89AB, vnacc1x89AB));
+    int32x4_t vacc1xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc1xCDEF, vnacc1xCDEF));
+    int32x4_t vacc2x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x0123, vnacc2x0123));
+    int32x4_t vacc2x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x4567, vnacc2x4567));
+    int32x4_t vacc2x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc2x89AB, vnacc2x89AB));
+    int32x4_t vacc2xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc2xCDEF, vnacc2xCDEF));
+    int32x4_t vacc3x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x0123, vnacc3x0123));
+    int32x4_t vacc3x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x4567, vnacc3x4567));
+    int32x4_t vacc3x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc3x89AB, vnacc3x89AB));
+    int32x4_t vacc3xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc3xCDEF, vnacc3xCDEF));
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+    vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift);
+    vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+    vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
+    vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+    vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
+    vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
+
+    uint8x16_t vout0x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc0x89ABCDEF);
+    uint8x16_t vout1x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc1x01234567), vacc1x89ABCDEF);
+    uint8x16_t vout2x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc2x01234567), vacc2x89ABCDEF);
+    uint8x16_t vout3x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc3x01234567), vacc3x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
+
+    uint8x16_t vout0x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc0x89ABCDEF));
+    uint8x16_t vout1x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc1x01234567), vqmovun_s16(vacc1x89ABCDEF));
+    uint8x16_t vout2x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc2x01234567), vqmovun_s16(vacc2x89ABCDEF));
+    uint8x16_t vout3x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc3x01234567), vqmovun_s16(vacc3x89ABCDEF));
+#endif
+    const uint8x16_t voutput_min = vld1q_dup_u8(&params->rndnu_neon.output_min);
+    const uint8x16_t voutput_max = vld1q_dup_u8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_u8(vout0x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_u8(vout1x0123456789ABCDEF, voutput_min);
+    vout2x0123456789ABCDEF = vmaxq_u8(vout2x0123456789ABCDEF, voutput_min);
+    vout3x0123456789ABCDEF = vmaxq_u8(vout3x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_u8(vout0x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_u8(vout1x0123456789ABCDEF, voutput_max);
+    vout2x0123456789ABCDEF = vminq_u8(vout2x0123456789ABCDEF, voutput_max);
+    vout3x0123456789ABCDEF = vminq_u8(vout3x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_u8(c0 + 0, vout0x0123456789ABCDEF);
+      vst1q_u8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_u8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_u8(c3 + 0, vout3x0123456789ABCDEF);
+
+      c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
+      c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
+
+      a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const uint8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const uint8_t*) ((uintptr_t) a2 - kc);
+      a3 = (const uint8_t*) ((uintptr_t) a3 - kc);
+
+      nc -= 16;
+    } else {
+      uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vget_low_u8(vout0x0123456789ABCDEF), vget_low_u8(vout1x0123456789ABCDEF));
+      uint8x16_t vout2x01234567_3x01234567 = vcombine_u8(vget_low_u8(vout2x0123456789ABCDEF), vget_low_u8(vout3x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_u8(c0, vget_low_u8(vout0x01234567_1x01234567)); c0 += 8;
+        vst1_u8(c1, vget_high_u8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_u8(c2, vget_low_u8(vout2x01234567_3x01234567)); c2 += 8;
+        vst1_u8(c3, vget_high_u8(vout2x01234567_3x01234567)); c3 += 8;
+        vout0x01234567_1x01234567 = vcombine_u8(vget_high_u8(vout0x0123456789ABCDEF), vget_high_u8(vout1x0123456789ABCDEF));
+        vout2x01234567_3x01234567 = vcombine_u8(vget_high_u8(vout2x0123456789ABCDEF), vget_high_u8(vout3x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_u8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_u8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_u8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_u8(c3, vout2x01234567_3x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qu8-gemm/gen/4x8c4-minmax-rndnu-neondot.c b/src/qu8-gemm/gen/4x8c4-minmax-rndnu-neondot.c
new file mode 100644
index 0000000..95c2e47
--- /dev/null
+++ b/src/qu8-gemm/gen/4x8c4-minmax-rndnu-neondot.c
@@ -0,0 +1,279 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qu8-gemm/c4-neondot.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const uint8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    uint8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(uint8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 4 * sizeof(uint8_t));
+  const uint8_t* a0 = a;
+  uint8_t* c0 = c;
+  const uint8_t* a1 = (const uint8_t*) ((uintptr_t) a0 + a_stride);
+  uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const uint8_t* a2 = (const uint8_t*) ((uintptr_t) a1 + a_stride);
+  uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const uint8_t* a3 = (const uint8_t*) ((uintptr_t) a2 + a_stride);
+  uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+
+  const uint8x16_t vb_zero_point = vld1q_dup_u8(&params->rndnu_neon.kernel_zero_point[0]);
+
+  // Loop over groups of 8 columns.
+  do {
+    // Initialize accumulators with bias. 8 bias values are loaded from the
+    // weight matrix, at the start of the group of 8 columns.
+    uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc1x0123 = vpacc0x0123;
+    uint32x4_t vpacc1x4567 = vpacc0x4567;
+    uint32x4_t vpacc2x0123 = vpacc0x0123;
+    uint32x4_t vpacc2x4567 = vpacc0x4567;
+    uint32x4_t vpacc3x0123 = vpacc0x0123;
+    uint32x4_t vpacc3x4567 = vpacc0x4567;
+    uint32x4_t vnacc0x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc0x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc1x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc1x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc2x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc2x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc3x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc3x4567 = vmovq_n_u32(0);
+
+    // Inner accumulation loop along the 8 columns.
+    size_t k = kc;
+    // 2x partial unrolled loop to load 8 bytes at a time.
+    while (k >= 8 * sizeof(uint8_t)) {
+      // Load a 4x8 block of activations.
+      const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
+      const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
+      const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 8;
+      const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 8;
+
+      // Load a 8x8 block of weights.
+      const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+      // Multiply-accumulate: 4x8 * 8x8 --> 4x8.
+      vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+      vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+      vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+      vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+      vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
+      vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 0);
+      vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
+      vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 0);
+      vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
+      vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 0);
+      vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
+      vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 0);
+      vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
+      vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 0);
+      vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
+      vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 0);
+      vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
+      vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 1);
+      vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
+      vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 1);
+      vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
+      vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 1);
+      vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
+      vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 1);
+      vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb4567x0123, va2x01234567, 1);
+      vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 1);
+      vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb4567x4567, va2x01234567, 1);
+      vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 1);
+      vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb4567x0123, va3x01234567, 1);
+      vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 1);
+      vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb4567x4567, va3x01234567, 1);
+      vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 1);
+
+      k -= 8 * sizeof(uint8_t);
+    }
+    // Handle up to 4 final positions of `k`
+    if XNN_UNLIKELY(k != 0) {
+      // Load a 4x4 block of activations.
+      const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 4;
+      const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 4;
+      const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 4;
+      const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 4;
+
+      // Load a 4x8 block of weights.
+      const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+      // Multiply-accumulate: 4x4 * 4x8 --> 4x8.
+      vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+      vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+      vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+      vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+      vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
+      vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 0);
+      vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
+      vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 0);
+      vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
+      vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 0);
+      vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
+      vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 0);
+      vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
+      vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 0);
+      vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
+      vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 0);
+    }
+
+    // Subtract zero point accumulators with accumulators.
+    int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
+    int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x4567));
+    int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
+    int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x4567));
+    int32x4_t vacc2x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x0123, vnacc2x0123));
+    int32x4_t vacc2x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x4567, vnacc2x4567));
+    int32x4_t vacc3x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x0123, vnacc3x0123));
+    int32x4_t vacc3x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x4567, vnacc3x4567));
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+
+    uint8x16_t vout0x01234567_1x01234567 = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc1x01234567);
+    uint8x16_t vout2x01234567_3x01234567 = vqmovun_high_s16(vqmovun_s16(vacc2x01234567), vacc3x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+
+    uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc1x01234567));
+    uint8x16_t vout2x01234567_3x01234567 = vcombine_u8(vqmovun_s16(vacc2x01234567), vqmovun_s16(vacc3x01234567));
+#endif
+    const uint8x16_t voutput_min = vld1q_dup_u8(&params->rndnu_neon.output_min);
+    const uint8x16_t voutput_max = vld1q_dup_u8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_u8(vout0x01234567_1x01234567, voutput_min);
+    vout2x01234567_3x01234567 = vmaxq_u8(vout2x01234567_3x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_u8(vout0x01234567_1x01234567, voutput_max);
+    vout2x01234567_3x01234567 = vminq_u8(vout2x01234567_3x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_u8(c0 + 0, vget_low_u8(vout0x01234567_1x01234567));
+      vst1_u8(c1 + 0, vget_high_u8(vout0x01234567_1x01234567));
+      vst1_u8(c2 + 0, vget_low_u8(vout2x01234567_3x01234567));
+      vst1_u8(c3 + 0, vget_high_u8(vout2x01234567_3x01234567));
+
+      c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
+      c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
+
+      a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const uint8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const uint8_t*) ((uintptr_t) a2 - kc);
+      a3 = (const uint8_t*) ((uintptr_t) a3 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_u8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_u8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_u8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_u8(c3, vout2x01234567_3x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qu8-gemm/gen/6x16c4-minmax-rndnu-neondot.c b/src/qu8-gemm/gen/6x16c4-minmax-rndnu-neondot.c
new file mode 100644
index 0000000..4c2b496
--- /dev/null
+++ b/src/qu8-gemm/gen/6x16c4-minmax-rndnu-neondot.c
@@ -0,0 +1,553 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qu8-gemm/c4-neondot.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const uint8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    uint8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 6);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(uint8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 4 * sizeof(uint8_t));
+  const uint8_t* a0 = a;
+  uint8_t* c0 = c;
+  const uint8_t* a1 = (const uint8_t*) ((uintptr_t) a0 + a_stride);
+  uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const uint8_t* a2 = (const uint8_t*) ((uintptr_t) a1 + a_stride);
+  uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const uint8_t* a3 = (const uint8_t*) ((uintptr_t) a2 + a_stride);
+  uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const uint8_t* a4 = (const uint8_t*) ((uintptr_t) a3 + a_stride);
+  uint8_t* c4 = (uint8_t*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+  const uint8_t* a5 = (const uint8_t*) ((uintptr_t) a4 + a_stride);
+  uint8_t* c5 = (uint8_t*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 6) {
+    a5 = a4;
+    c5 = c4;
+  }
+
+  const uint8x16_t vb_zero_point = vld1q_dup_u8(&params->rndnu_neon.kernel_zero_point[0]);
+
+  // Loop over groups of 16 columns.
+  do {
+    // Initialize accumulators with bias. 16 bias values are loaded from the
+    // weight matrix, at the start of the group of 16 columns.
+    uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0x89AB = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0xCDEF = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc1x0123 = vpacc0x0123;
+    uint32x4_t vpacc1x4567 = vpacc0x4567;
+    uint32x4_t vpacc1x89AB = vpacc0x89AB;
+    uint32x4_t vpacc1xCDEF = vpacc0xCDEF;
+    uint32x4_t vpacc2x0123 = vpacc0x0123;
+    uint32x4_t vpacc2x4567 = vpacc0x4567;
+    uint32x4_t vpacc2x89AB = vpacc0x89AB;
+    uint32x4_t vpacc2xCDEF = vpacc0xCDEF;
+    uint32x4_t vpacc3x0123 = vpacc0x0123;
+    uint32x4_t vpacc3x4567 = vpacc0x4567;
+    uint32x4_t vpacc3x89AB = vpacc0x89AB;
+    uint32x4_t vpacc3xCDEF = vpacc0xCDEF;
+    uint32x4_t vpacc4x0123 = vpacc0x0123;
+    uint32x4_t vpacc4x4567 = vpacc0x4567;
+    uint32x4_t vpacc4x89AB = vpacc0x89AB;
+    uint32x4_t vpacc4xCDEF = vpacc0xCDEF;
+    uint32x4_t vpacc5x0123 = vpacc0x0123;
+    uint32x4_t vpacc5x4567 = vpacc0x4567;
+    uint32x4_t vpacc5x89AB = vpacc0x89AB;
+    uint32x4_t vpacc5xCDEF = vpacc0xCDEF;
+    uint32x4_t vnacc0x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc0x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc0x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc0xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc1x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc1x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc1x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc1xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc2x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc2x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc2x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc2xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc3x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc3x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc3x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc3xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc4x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc4x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc4x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc4xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc5x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc5x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc5x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc5xCDEF = vmovq_n_u32(0);
+
+    // Inner accumulation loop along the 16 columns.
+    size_t k = kc;
+    // 2x partial unrolled loop to load 8 bytes at a time.
+    while (k >= 8 * sizeof(uint8_t)) {
+      // Load a 6x8 block of activations.
+      const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
+      const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
+      const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 8;
+      const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 8;
+      const uint8x8_t va4x01234567 = vld1_u8(a4); a4 += 8;
+      const uint8x8_t va5x01234567 = vld1_u8(a5); a5 += 8;
+
+      // Load a 8x16 block of weights.
+      const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb4567x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb4567xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+      // Multiply-accumulate: 6x8 * 8x16 --> 6x16.
+      vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+      vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+      vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+      vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+      vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
+      vnacc0x89AB = vdotq_lane_u32(vnacc0x89AB, vb_zero_point, va0x01234567, 0);
+      vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
+      vnacc0xCDEF = vdotq_lane_u32(vnacc0xCDEF, vb_zero_point, va0x01234567, 0);
+      vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
+      vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 0);
+      vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
+      vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 0);
+      vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
+      vnacc1x89AB = vdotq_lane_u32(vnacc1x89AB, vb_zero_point, va1x01234567, 0);
+      vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
+      vnacc1xCDEF = vdotq_lane_u32(vnacc1xCDEF, vb_zero_point, va1x01234567, 0);
+      vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
+      vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 0);
+      vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
+      vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 0);
+      vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
+      vnacc2x89AB = vdotq_lane_u32(vnacc2x89AB, vb_zero_point, va2x01234567, 0);
+      vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
+      vnacc2xCDEF = vdotq_lane_u32(vnacc2xCDEF, vb_zero_point, va2x01234567, 0);
+      vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
+      vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 0);
+      vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
+      vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 0);
+      vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb0123x89AB, va3x01234567, 0);
+      vnacc3x89AB = vdotq_lane_u32(vnacc3x89AB, vb_zero_point, va3x01234567, 0);
+      vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
+      vnacc3xCDEF = vdotq_lane_u32(vnacc3xCDEF, vb_zero_point, va3x01234567, 0);
+      vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
+      vnacc4x0123 = vdotq_lane_u32(vnacc4x0123, vb_zero_point, va4x01234567, 0);
+      vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
+      vnacc4x4567 = vdotq_lane_u32(vnacc4x4567, vb_zero_point, va4x01234567, 0);
+      vpacc4x89AB = vdotq_lane_u32(vpacc4x89AB, vb0123x89AB, va4x01234567, 0);
+      vnacc4x89AB = vdotq_lane_u32(vnacc4x89AB, vb_zero_point, va4x01234567, 0);
+      vpacc4xCDEF = vdotq_lane_u32(vpacc4xCDEF, vb0123xCDEF, va4x01234567, 0);
+      vnacc4xCDEF = vdotq_lane_u32(vnacc4xCDEF, vb_zero_point, va4x01234567, 0);
+      vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb0123x0123, va5x01234567, 0);
+      vnacc5x0123 = vdotq_lane_u32(vnacc5x0123, vb_zero_point, va5x01234567, 0);
+      vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb0123x4567, va5x01234567, 0);
+      vnacc5x4567 = vdotq_lane_u32(vnacc5x4567, vb_zero_point, va5x01234567, 0);
+      vpacc5x89AB = vdotq_lane_u32(vpacc5x89AB, vb0123x89AB, va5x01234567, 0);
+      vnacc5x89AB = vdotq_lane_u32(vnacc5x89AB, vb_zero_point, va5x01234567, 0);
+      vpacc5xCDEF = vdotq_lane_u32(vpacc5xCDEF, vb0123xCDEF, va5x01234567, 0);
+      vnacc5xCDEF = vdotq_lane_u32(vnacc5xCDEF, vb_zero_point, va5x01234567, 0);
+      vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
+      vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 1);
+      vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
+      vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 1);
+      vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb4567x89AB, va0x01234567, 1);
+      vnacc0x89AB = vdotq_lane_u32(vnacc0x89AB, vb_zero_point, va0x01234567, 1);
+      vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb4567xCDEF, va0x01234567, 1);
+      vnacc0xCDEF = vdotq_lane_u32(vnacc0xCDEF, vb_zero_point, va0x01234567, 1);
+      vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
+      vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 1);
+      vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
+      vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 1);
+      vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb4567x89AB, va1x01234567, 1);
+      vnacc1x89AB = vdotq_lane_u32(vnacc1x89AB, vb_zero_point, va1x01234567, 1);
+      vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb4567xCDEF, va1x01234567, 1);
+      vnacc1xCDEF = vdotq_lane_u32(vnacc1xCDEF, vb_zero_point, va1x01234567, 1);
+      vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb4567x0123, va2x01234567, 1);
+      vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 1);
+      vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb4567x4567, va2x01234567, 1);
+      vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 1);
+      vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb4567x89AB, va2x01234567, 1);
+      vnacc2x89AB = vdotq_lane_u32(vnacc2x89AB, vb_zero_point, va2x01234567, 1);
+      vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb4567xCDEF, va2x01234567, 1);
+      vnacc2xCDEF = vdotq_lane_u32(vnacc2xCDEF, vb_zero_point, va2x01234567, 1);
+      vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb4567x0123, va3x01234567, 1);
+      vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 1);
+      vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb4567x4567, va3x01234567, 1);
+      vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 1);
+      vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb4567x89AB, va3x01234567, 1);
+      vnacc3x89AB = vdotq_lane_u32(vnacc3x89AB, vb_zero_point, va3x01234567, 1);
+      vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb4567xCDEF, va3x01234567, 1);
+      vnacc3xCDEF = vdotq_lane_u32(vnacc3xCDEF, vb_zero_point, va3x01234567, 1);
+      vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb4567x0123, va4x01234567, 1);
+      vnacc4x0123 = vdotq_lane_u32(vnacc4x0123, vb_zero_point, va4x01234567, 1);
+      vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb4567x4567, va4x01234567, 1);
+      vnacc4x4567 = vdotq_lane_u32(vnacc4x4567, vb_zero_point, va4x01234567, 1);
+      vpacc4x89AB = vdotq_lane_u32(vpacc4x89AB, vb4567x89AB, va4x01234567, 1);
+      vnacc4x89AB = vdotq_lane_u32(vnacc4x89AB, vb_zero_point, va4x01234567, 1);
+      vpacc4xCDEF = vdotq_lane_u32(vpacc4xCDEF, vb4567xCDEF, va4x01234567, 1);
+      vnacc4xCDEF = vdotq_lane_u32(vnacc4xCDEF, vb_zero_point, va4x01234567, 1);
+      vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb4567x0123, va5x01234567, 1);
+      vnacc5x0123 = vdotq_lane_u32(vnacc5x0123, vb_zero_point, va5x01234567, 1);
+      vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb4567x4567, va5x01234567, 1);
+      vnacc5x4567 = vdotq_lane_u32(vnacc5x4567, vb_zero_point, va5x01234567, 1);
+      vpacc5x89AB = vdotq_lane_u32(vpacc5x89AB, vb4567x89AB, va5x01234567, 1);
+      vnacc5x89AB = vdotq_lane_u32(vnacc5x89AB, vb_zero_point, va5x01234567, 1);
+      vpacc5xCDEF = vdotq_lane_u32(vpacc5xCDEF, vb4567xCDEF, va5x01234567, 1);
+      vnacc5xCDEF = vdotq_lane_u32(vnacc5xCDEF, vb_zero_point, va5x01234567, 1);
+
+      k -= 8 * sizeof(uint8_t);
+    }
+    // Handle up to 4 final positions of `k`
+    if XNN_UNLIKELY(k != 0) {
+      // Load a 6x4 block of activations.
+      const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 4;
+      const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 4;
+      const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 4;
+      const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 4;
+      const uint8x8_t va4x01234567 = vld1_u8(a4); a4 += 4;
+      const uint8x8_t va5x01234567 = vld1_u8(a5); a5 += 4;
+
+      // Load a 4x16 block of weights.
+      const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+      // Multiply-accumulate: 6x4 * 4x16 --> 6x16.
+      vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+      vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+      vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+      vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+      vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
+      vnacc0x89AB = vdotq_lane_u32(vnacc0x89AB, vb_zero_point, va0x01234567, 0);
+      vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
+      vnacc0xCDEF = vdotq_lane_u32(vnacc0xCDEF, vb_zero_point, va0x01234567, 0);
+      vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
+      vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 0);
+      vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
+      vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 0);
+      vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
+      vnacc1x89AB = vdotq_lane_u32(vnacc1x89AB, vb_zero_point, va1x01234567, 0);
+      vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
+      vnacc1xCDEF = vdotq_lane_u32(vnacc1xCDEF, vb_zero_point, va1x01234567, 0);
+      vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
+      vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 0);
+      vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
+      vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 0);
+      vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
+      vnacc2x89AB = vdotq_lane_u32(vnacc2x89AB, vb_zero_point, va2x01234567, 0);
+      vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
+      vnacc2xCDEF = vdotq_lane_u32(vnacc2xCDEF, vb_zero_point, va2x01234567, 0);
+      vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
+      vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 0);
+      vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
+      vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 0);
+      vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb0123x89AB, va3x01234567, 0);
+      vnacc3x89AB = vdotq_lane_u32(vnacc3x89AB, vb_zero_point, va3x01234567, 0);
+      vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
+      vnacc3xCDEF = vdotq_lane_u32(vnacc3xCDEF, vb_zero_point, va3x01234567, 0);
+      vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
+      vnacc4x0123 = vdotq_lane_u32(vnacc4x0123, vb_zero_point, va4x01234567, 0);
+      vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
+      vnacc4x4567 = vdotq_lane_u32(vnacc4x4567, vb_zero_point, va4x01234567, 0);
+      vpacc4x89AB = vdotq_lane_u32(vpacc4x89AB, vb0123x89AB, va4x01234567, 0);
+      vnacc4x89AB = vdotq_lane_u32(vnacc4x89AB, vb_zero_point, va4x01234567, 0);
+      vpacc4xCDEF = vdotq_lane_u32(vpacc4xCDEF, vb0123xCDEF, va4x01234567, 0);
+      vnacc4xCDEF = vdotq_lane_u32(vnacc4xCDEF, vb_zero_point, va4x01234567, 0);
+      vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb0123x0123, va5x01234567, 0);
+      vnacc5x0123 = vdotq_lane_u32(vnacc5x0123, vb_zero_point, va5x01234567, 0);
+      vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb0123x4567, va5x01234567, 0);
+      vnacc5x4567 = vdotq_lane_u32(vnacc5x4567, vb_zero_point, va5x01234567, 0);
+      vpacc5x89AB = vdotq_lane_u32(vpacc5x89AB, vb0123x89AB, va5x01234567, 0);
+      vnacc5x89AB = vdotq_lane_u32(vnacc5x89AB, vb_zero_point, va5x01234567, 0);
+      vpacc5xCDEF = vdotq_lane_u32(vpacc5xCDEF, vb0123xCDEF, va5x01234567, 0);
+      vnacc5xCDEF = vdotq_lane_u32(vnacc5xCDEF, vb_zero_point, va5x01234567, 0);
+    }
+
+    // Subtract zero point accumulators with accumulators.
+    int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
+    int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x4567));
+    int32x4_t vacc0x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc0x89AB, vnacc0x89AB));
+    int32x4_t vacc0xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc0xCDEF, vnacc0xCDEF));
+    int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
+    int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x4567));
+    int32x4_t vacc1x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc1x89AB, vnacc1x89AB));
+    int32x4_t vacc1xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc1xCDEF, vnacc1xCDEF));
+    int32x4_t vacc2x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x0123, vnacc2x0123));
+    int32x4_t vacc2x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x4567, vnacc2x4567));
+    int32x4_t vacc2x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc2x89AB, vnacc2x89AB));
+    int32x4_t vacc2xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc2xCDEF, vnacc2xCDEF));
+    int32x4_t vacc3x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x0123, vnacc3x0123));
+    int32x4_t vacc3x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x4567, vnacc3x4567));
+    int32x4_t vacc3x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc3x89AB, vnacc3x89AB));
+    int32x4_t vacc3xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc3xCDEF, vnacc3xCDEF));
+    int32x4_t vacc4x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x0123, vnacc4x0123));
+    int32x4_t vacc4x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x4567, vnacc4x4567));
+    int32x4_t vacc4x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc4x89AB, vnacc4x89AB));
+    int32x4_t vacc4xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc4xCDEF, vnacc4xCDEF));
+    int32x4_t vacc5x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc5x0123, vnacc5x0123));
+    int32x4_t vacc5x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc5x4567, vnacc5x4567));
+    int32x4_t vacc5x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc5x89AB, vnacc5x89AB));
+    int32x4_t vacc5xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc5xCDEF, vnacc5xCDEF));
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+    vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift);
+    vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift);
+    vacc4x0123 = vshlq_s32(vacc4x0123, vright_pre_shift);
+    vacc4x4567 = vshlq_s32(vacc4x4567, vright_pre_shift);
+    vacc4x89AB = vshlq_s32(vacc4x89AB, vright_pre_shift);
+    vacc4xCDEF = vshlq_s32(vacc4xCDEF, vright_pre_shift);
+    vacc5x0123 = vshlq_s32(vacc5x0123, vright_pre_shift);
+    vacc5x4567 = vshlq_s32(vacc5x4567, vright_pre_shift);
+    vacc5x89AB = vshlq_s32(vacc5x89AB, vright_pre_shift);
+    vacc5xCDEF = vshlq_s32(vacc5xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+    vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
+    vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
+    vacc4x0123 = vqdmulhq_s32(vacc4x0123, vmultiplier);
+    vacc4x4567 = vqdmulhq_s32(vacc4x4567, vmultiplier);
+    vacc4x89AB = vqdmulhq_s32(vacc4x89AB, vmultiplier);
+    vacc4xCDEF = vqdmulhq_s32(vacc4xCDEF, vmultiplier);
+    vacc5x0123 = vqdmulhq_s32(vacc5x0123, vmultiplier);
+    vacc5x4567 = vqdmulhq_s32(vacc5x4567, vmultiplier);
+    vacc5x89AB = vqdmulhq_s32(vacc5x89AB, vmultiplier);
+    vacc5xCDEF = vqdmulhq_s32(vacc5xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+    vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
+    vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
+    vacc4x0123 = vrshlq_s32(vacc4x0123, vright_post_shift);
+    vacc4x4567 = vrshlq_s32(vacc4x4567, vright_post_shift);
+    vacc4x89AB = vrshlq_s32(vacc4x89AB, vright_post_shift);
+    vacc4xCDEF = vrshlq_s32(vacc4xCDEF, vright_post_shift);
+    vacc5x0123 = vrshlq_s32(vacc5x0123, vright_post_shift);
+    vacc5x4567 = vrshlq_s32(vacc5x4567, vright_post_shift);
+    vacc5x89AB = vrshlq_s32(vacc5x89AB, vright_post_shift);
+    vacc5xCDEF = vrshlq_s32(vacc5xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
+    const int16x8_t vacc4x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x0123), vacc4x4567), voutput_zero_point);
+    const int16x8_t vacc4x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x89AB), vacc4xCDEF), voutput_zero_point);
+    const int16x8_t vacc5x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc5x0123), vacc5x4567), voutput_zero_point);
+    const int16x8_t vacc5x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc5x89AB), vacc5xCDEF), voutput_zero_point);
+
+    uint8x16_t vout0x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc0x89ABCDEF);
+    uint8x16_t vout1x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc1x01234567), vacc1x89ABCDEF);
+    uint8x16_t vout2x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc2x01234567), vacc2x89ABCDEF);
+    uint8x16_t vout3x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc3x01234567), vacc3x89ABCDEF);
+    uint8x16_t vout4x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc4x01234567), vacc4x89ABCDEF);
+    uint8x16_t vout5x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc5x01234567), vacc5x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
+    const int16x8_t vacc4x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x0123), vqmovn_s32(vacc4x4567)), voutput_zero_point);
+    const int16x8_t vacc4x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x89AB), vqmovn_s32(vacc4xCDEF)), voutput_zero_point);
+    const int16x8_t vacc5x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc5x0123), vqmovn_s32(vacc5x4567)), voutput_zero_point);
+    const int16x8_t vacc5x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc5x89AB), vqmovn_s32(vacc5xCDEF)), voutput_zero_point);
+
+    uint8x16_t vout0x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc0x89ABCDEF));
+    uint8x16_t vout1x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc1x01234567), vqmovun_s16(vacc1x89ABCDEF));
+    uint8x16_t vout2x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc2x01234567), vqmovun_s16(vacc2x89ABCDEF));
+    uint8x16_t vout3x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc3x01234567), vqmovun_s16(vacc3x89ABCDEF));
+    uint8x16_t vout4x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc4x01234567), vqmovun_s16(vacc4x89ABCDEF));
+    uint8x16_t vout5x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc5x01234567), vqmovun_s16(vacc5x89ABCDEF));
+#endif
+    const uint8x16_t voutput_min = vld1q_dup_u8(&params->rndnu_neon.output_min);
+    const uint8x16_t voutput_max = vld1q_dup_u8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_u8(vout0x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_u8(vout1x0123456789ABCDEF, voutput_min);
+    vout2x0123456789ABCDEF = vmaxq_u8(vout2x0123456789ABCDEF, voutput_min);
+    vout3x0123456789ABCDEF = vmaxq_u8(vout3x0123456789ABCDEF, voutput_min);
+    vout4x0123456789ABCDEF = vmaxq_u8(vout4x0123456789ABCDEF, voutput_min);
+    vout5x0123456789ABCDEF = vmaxq_u8(vout5x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_u8(vout0x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_u8(vout1x0123456789ABCDEF, voutput_max);
+    vout2x0123456789ABCDEF = vminq_u8(vout2x0123456789ABCDEF, voutput_max);
+    vout3x0123456789ABCDEF = vminq_u8(vout3x0123456789ABCDEF, voutput_max);
+    vout4x0123456789ABCDEF = vminq_u8(vout4x0123456789ABCDEF, voutput_max);
+    vout5x0123456789ABCDEF = vminq_u8(vout5x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_u8(c0 + 0, vout0x0123456789ABCDEF);
+      vst1q_u8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_u8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_u8(c3 + 0, vout3x0123456789ABCDEF);
+      vst1q_u8(c4 + 0, vout4x0123456789ABCDEF);
+      vst1q_u8(c5 + 0, vout5x0123456789ABCDEF);
+
+      c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
+      c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
+      c4 = (uint8_t*) ((uintptr_t) c4 + cn_stride);
+      c5 = (uint8_t*) ((uintptr_t) c5 + cn_stride);
+
+      a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const uint8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const uint8_t*) ((uintptr_t) a2 - kc);
+      a3 = (const uint8_t*) ((uintptr_t) a3 - kc);
+      a4 = (const uint8_t*) ((uintptr_t) a4 - kc);
+      a5 = (const uint8_t*) ((uintptr_t) a5 - kc);
+
+      nc -= 16;
+    } else {
+      uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vget_low_u8(vout0x0123456789ABCDEF), vget_low_u8(vout1x0123456789ABCDEF));
+      uint8x16_t vout2x01234567_3x01234567 = vcombine_u8(vget_low_u8(vout2x0123456789ABCDEF), vget_low_u8(vout3x0123456789ABCDEF));
+      uint8x16_t vout4x01234567_5x01234567 = vcombine_u8(vget_low_u8(vout4x0123456789ABCDEF), vget_low_u8(vout5x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_u8(c0, vget_low_u8(vout0x01234567_1x01234567)); c0 += 8;
+        vst1_u8(c1, vget_high_u8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_u8(c2, vget_low_u8(vout2x01234567_3x01234567)); c2 += 8;
+        vst1_u8(c3, vget_high_u8(vout2x01234567_3x01234567)); c3 += 8;
+        vst1_u8(c4, vget_low_u8(vout4x01234567_5x01234567)); c4 += 8;
+        vst1_u8(c5, vget_high_u8(vout4x01234567_5x01234567)); c5 += 8;
+        vout0x01234567_1x01234567 = vcombine_u8(vget_high_u8(vout0x0123456789ABCDEF), vget_high_u8(vout1x0123456789ABCDEF));
+        vout2x01234567_3x01234567 = vcombine_u8(vget_high_u8(vout2x0123456789ABCDEF), vget_high_u8(vout3x0123456789ABCDEF));
+        vout4x01234567_5x01234567 = vcombine_u8(vget_high_u8(vout4x0123456789ABCDEF), vget_high_u8(vout5x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c4, 1), vreinterpretq_u32_u8(vout4x01234567_5x01234567), 0); c4 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c5, 1), vreinterpretq_u32_u8(vout4x01234567_5x01234567), 2); c5 += 4;
+        vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_u8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_u8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c4, 1), vreinterpretq_u16_u8(vout4x01234567_5x01234567), 0); c4 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c5, 1), vreinterpretq_u16_u8(vout4x01234567_5x01234567), 4); c5 += 2;
+        vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+        vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_u8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_u8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_u8(c4, vout4x01234567_5x01234567, 0);
+        vst1q_lane_u8(c5, vout4x01234567_5x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qu8-gemm/gen/6x8c4-minmax-rndnu-neondot.c b/src/qu8-gemm/gen/6x8c4-minmax-rndnu-neondot.c
new file mode 100644
index 0000000..1ae367d
--- /dev/null
+++ b/src/qu8-gemm/gen/6x8c4-minmax-rndnu-neondot.c
@@ -0,0 +1,365 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qu8-gemm/c4-neondot.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const uint8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    uint8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 6);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(uint8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 4 * sizeof(uint8_t));
+  const uint8_t* a0 = a;
+  uint8_t* c0 = c;
+  const uint8_t* a1 = (const uint8_t*) ((uintptr_t) a0 + a_stride);
+  uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const uint8_t* a2 = (const uint8_t*) ((uintptr_t) a1 + a_stride);
+  uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const uint8_t* a3 = (const uint8_t*) ((uintptr_t) a2 + a_stride);
+  uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const uint8_t* a4 = (const uint8_t*) ((uintptr_t) a3 + a_stride);
+  uint8_t* c4 = (uint8_t*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+  const uint8_t* a5 = (const uint8_t*) ((uintptr_t) a4 + a_stride);
+  uint8_t* c5 = (uint8_t*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 6) {
+    a5 = a4;
+    c5 = c4;
+  }
+
+  const uint8x16_t vb_zero_point = vld1q_dup_u8(&params->rndnu_neon.kernel_zero_point[0]);
+
+  // Loop over groups of 8 columns.
+  do {
+    // Initialize accumulators with bias. 8 bias values are loaded from the
+    // weight matrix, at the start of the group of 8 columns.
+    uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc1x0123 = vpacc0x0123;
+    uint32x4_t vpacc1x4567 = vpacc0x4567;
+    uint32x4_t vpacc2x0123 = vpacc0x0123;
+    uint32x4_t vpacc2x4567 = vpacc0x4567;
+    uint32x4_t vpacc3x0123 = vpacc0x0123;
+    uint32x4_t vpacc3x4567 = vpacc0x4567;
+    uint32x4_t vpacc4x0123 = vpacc0x0123;
+    uint32x4_t vpacc4x4567 = vpacc0x4567;
+    uint32x4_t vpacc5x0123 = vpacc0x0123;
+    uint32x4_t vpacc5x4567 = vpacc0x4567;
+    uint32x4_t vnacc0x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc0x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc1x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc1x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc2x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc2x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc3x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc3x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc4x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc4x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc5x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc5x4567 = vmovq_n_u32(0);
+
+    // Inner accumulation loop along the 8 columns.
+    size_t k = kc;
+    // 2x partial unrolled loop to load 8 bytes at a time.
+    while (k >= 8 * sizeof(uint8_t)) {
+      // Load a 6x8 block of activations.
+      const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
+      const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
+      const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 8;
+      const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 8;
+      const uint8x8_t va4x01234567 = vld1_u8(a4); a4 += 8;
+      const uint8x8_t va5x01234567 = vld1_u8(a5); a5 += 8;
+
+      // Load a 8x8 block of weights.
+      const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+      // Multiply-accumulate: 6x8 * 8x8 --> 6x8.
+      vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+      vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+      vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+      vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+      vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
+      vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 0);
+      vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
+      vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 0);
+      vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
+      vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 0);
+      vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
+      vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 0);
+      vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
+      vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 0);
+      vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
+      vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 0);
+      vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
+      vnacc4x0123 = vdotq_lane_u32(vnacc4x0123, vb_zero_point, va4x01234567, 0);
+      vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
+      vnacc4x4567 = vdotq_lane_u32(vnacc4x4567, vb_zero_point, va4x01234567, 0);
+      vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb0123x0123, va5x01234567, 0);
+      vnacc5x0123 = vdotq_lane_u32(vnacc5x0123, vb_zero_point, va5x01234567, 0);
+      vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb0123x4567, va5x01234567, 0);
+      vnacc5x4567 = vdotq_lane_u32(vnacc5x4567, vb_zero_point, va5x01234567, 0);
+      vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
+      vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 1);
+      vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
+      vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 1);
+      vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
+      vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 1);
+      vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
+      vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 1);
+      vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb4567x0123, va2x01234567, 1);
+      vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 1);
+      vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb4567x4567, va2x01234567, 1);
+      vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 1);
+      vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb4567x0123, va3x01234567, 1);
+      vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 1);
+      vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb4567x4567, va3x01234567, 1);
+      vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 1);
+      vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb4567x0123, va4x01234567, 1);
+      vnacc4x0123 = vdotq_lane_u32(vnacc4x0123, vb_zero_point, va4x01234567, 1);
+      vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb4567x4567, va4x01234567, 1);
+      vnacc4x4567 = vdotq_lane_u32(vnacc4x4567, vb_zero_point, va4x01234567, 1);
+      vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb4567x0123, va5x01234567, 1);
+      vnacc5x0123 = vdotq_lane_u32(vnacc5x0123, vb_zero_point, va5x01234567, 1);
+      vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb4567x4567, va5x01234567, 1);
+      vnacc5x4567 = vdotq_lane_u32(vnacc5x4567, vb_zero_point, va5x01234567, 1);
+
+      k -= 8 * sizeof(uint8_t);
+    }
+    // Handle up to 4 final positions of `k`
+    if XNN_UNLIKELY(k != 0) {
+      // Load a 6x4 block of activations.
+      const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 4;
+      const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 4;
+      const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 4;
+      const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 4;
+      const uint8x8_t va4x01234567 = vld1_u8(a4); a4 += 4;
+      const uint8x8_t va5x01234567 = vld1_u8(a5); a5 += 4;
+
+      // Load a 4x8 block of weights.
+      const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+      // Multiply-accumulate: 6x4 * 4x8 --> 6x8.
+      vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+      vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+      vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+      vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+      vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
+      vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 0);
+      vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
+      vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 0);
+      vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
+      vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 0);
+      vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
+      vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 0);
+      vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
+      vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 0);
+      vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
+      vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 0);
+      vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
+      vnacc4x0123 = vdotq_lane_u32(vnacc4x0123, vb_zero_point, va4x01234567, 0);
+      vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
+      vnacc4x4567 = vdotq_lane_u32(vnacc4x4567, vb_zero_point, va4x01234567, 0);
+      vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb0123x0123, va5x01234567, 0);
+      vnacc5x0123 = vdotq_lane_u32(vnacc5x0123, vb_zero_point, va5x01234567, 0);
+      vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb0123x4567, va5x01234567, 0);
+      vnacc5x4567 = vdotq_lane_u32(vnacc5x4567, vb_zero_point, va5x01234567, 0);
+    }
+
+    // Subtract zero point accumulators with accumulators.
+    int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
+    int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x4567));
+    int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
+    int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x4567));
+    int32x4_t vacc2x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x0123, vnacc2x0123));
+    int32x4_t vacc2x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x4567, vnacc2x4567));
+    int32x4_t vacc3x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x0123, vnacc3x0123));
+    int32x4_t vacc3x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x4567, vnacc3x4567));
+    int32x4_t vacc4x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x0123, vnacc4x0123));
+    int32x4_t vacc4x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x4567, vnacc4x4567));
+    int32x4_t vacc5x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc5x0123, vnacc5x0123));
+    int32x4_t vacc5x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc5x4567, vnacc5x4567));
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+    vacc4x0123 = vshlq_s32(vacc4x0123, vright_pre_shift);
+    vacc4x4567 = vshlq_s32(vacc4x4567, vright_pre_shift);
+    vacc5x0123 = vshlq_s32(vacc5x0123, vright_pre_shift);
+    vacc5x4567 = vshlq_s32(vacc5x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+    vacc4x0123 = vqdmulhq_s32(vacc4x0123, vmultiplier);
+    vacc4x4567 = vqdmulhq_s32(vacc4x4567, vmultiplier);
+    vacc5x0123 = vqdmulhq_s32(vacc5x0123, vmultiplier);
+    vacc5x4567 = vqdmulhq_s32(vacc5x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+    vacc4x0123 = vrshlq_s32(vacc4x0123, vright_post_shift);
+    vacc4x4567 = vrshlq_s32(vacc4x4567, vright_post_shift);
+    vacc5x0123 = vrshlq_s32(vacc5x0123, vright_post_shift);
+    vacc5x4567 = vrshlq_s32(vacc5x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+    const int16x8_t vacc4x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x0123), vacc4x4567), voutput_zero_point);
+    const int16x8_t vacc5x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc5x0123), vacc5x4567), voutput_zero_point);
+
+    uint8x16_t vout0x01234567_1x01234567 = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc1x01234567);
+    uint8x16_t vout2x01234567_3x01234567 = vqmovun_high_s16(vqmovun_s16(vacc2x01234567), vacc3x01234567);
+    uint8x16_t vout4x01234567_5x01234567 = vqmovun_high_s16(vqmovun_s16(vacc4x01234567), vacc5x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+    const int16x8_t vacc4x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x0123), vqmovn_s32(vacc4x4567)), voutput_zero_point);
+    const int16x8_t vacc5x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc5x0123), vqmovn_s32(vacc5x4567)), voutput_zero_point);
+
+    uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc1x01234567));
+    uint8x16_t vout2x01234567_3x01234567 = vcombine_u8(vqmovun_s16(vacc2x01234567), vqmovun_s16(vacc3x01234567));
+    uint8x16_t vout4x01234567_5x01234567 = vcombine_u8(vqmovun_s16(vacc4x01234567), vqmovun_s16(vacc5x01234567));
+#endif
+    const uint8x16_t voutput_min = vld1q_dup_u8(&params->rndnu_neon.output_min);
+    const uint8x16_t voutput_max = vld1q_dup_u8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_u8(vout0x01234567_1x01234567, voutput_min);
+    vout2x01234567_3x01234567 = vmaxq_u8(vout2x01234567_3x01234567, voutput_min);
+    vout4x01234567_5x01234567 = vmaxq_u8(vout4x01234567_5x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_u8(vout0x01234567_1x01234567, voutput_max);
+    vout2x01234567_3x01234567 = vminq_u8(vout2x01234567_3x01234567, voutput_max);
+    vout4x01234567_5x01234567 = vminq_u8(vout4x01234567_5x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_u8(c0 + 0, vget_low_u8(vout0x01234567_1x01234567));
+      vst1_u8(c1 + 0, vget_high_u8(vout0x01234567_1x01234567));
+      vst1_u8(c2 + 0, vget_low_u8(vout2x01234567_3x01234567));
+      vst1_u8(c3 + 0, vget_high_u8(vout2x01234567_3x01234567));
+      vst1_u8(c4 + 0, vget_low_u8(vout4x01234567_5x01234567));
+      vst1_u8(c5 + 0, vget_high_u8(vout4x01234567_5x01234567));
+
+      c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
+      c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
+      c4 = (uint8_t*) ((uintptr_t) c4 + cn_stride);
+      c5 = (uint8_t*) ((uintptr_t) c5 + cn_stride);
+
+      a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const uint8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const uint8_t*) ((uintptr_t) a2 - kc);
+      a3 = (const uint8_t*) ((uintptr_t) a3 - kc);
+      a4 = (const uint8_t*) ((uintptr_t) a4 - kc);
+      a5 = (const uint8_t*) ((uintptr_t) a5 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c4, 1), vreinterpretq_u32_u8(vout4x01234567_5x01234567), 0); c4 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c5, 1), vreinterpretq_u32_u8(vout4x01234567_5x01234567), 2); c5 += 4;
+        vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_u8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_u8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c4, 1), vreinterpretq_u16_u8(vout4x01234567_5x01234567), 0); c4 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c5, 1), vreinterpretq_u16_u8(vout4x01234567_5x01234567), 4); c5 += 2;
+        vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+        vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_u8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_u8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_u8(c4, vout4x01234567_5x01234567, 0);
+        vst1q_lane_u8(c5, vout4x01234567_5x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qu8-gemm/gen/8x16c4-minmax-rndnu-neondot.c b/src/qu8-gemm/gen/8x16c4-minmax-rndnu-neondot.c
new file mode 100644
index 0000000..0e5dbe8
--- /dev/null
+++ b/src/qu8-gemm/gen/8x16c4-minmax-rndnu-neondot.c
@@ -0,0 +1,699 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qu8-gemm/c4-neondot.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const uint8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    uint8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 8);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(uint8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 4 * sizeof(uint8_t));
+  const uint8_t* a0 = a;
+  uint8_t* c0 = c;
+  const uint8_t* a1 = (const uint8_t*) ((uintptr_t) a0 + a_stride);
+  uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const uint8_t* a2 = (const uint8_t*) ((uintptr_t) a1 + a_stride);
+  uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const uint8_t* a3 = (const uint8_t*) ((uintptr_t) a2 + a_stride);
+  uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const uint8_t* a4 = (const uint8_t*) ((uintptr_t) a3 + a_stride);
+  uint8_t* c4 = (uint8_t*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+  const uint8_t* a5 = (const uint8_t*) ((uintptr_t) a4 + a_stride);
+  uint8_t* c5 = (uint8_t*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 6) {
+    a5 = a4;
+    c5 = c4;
+  }
+  const uint8_t* a6 = (const uint8_t*) ((uintptr_t) a5 + a_stride);
+  uint8_t* c6 = (uint8_t*) ((uintptr_t) c5 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 6) {
+    a6 = a5;
+    c6 = c5;
+  }
+  const uint8_t* a7 = (const uint8_t*) ((uintptr_t) a6 + a_stride);
+  uint8_t* c7 = (uint8_t*) ((uintptr_t) c6 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 8) {
+    a7 = a6;
+    c7 = c6;
+  }
+
+  const uint8x16_t vb_zero_point = vld1q_dup_u8(&params->rndnu_neon.kernel_zero_point[0]);
+
+  // Loop over groups of 16 columns.
+  do {
+    // Initialize accumulators with bias. 16 bias values are loaded from the
+    // weight matrix, at the start of the group of 16 columns.
+    uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0x89AB = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0xCDEF = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc1x0123 = vpacc0x0123;
+    uint32x4_t vpacc1x4567 = vpacc0x4567;
+    uint32x4_t vpacc1x89AB = vpacc0x89AB;
+    uint32x4_t vpacc1xCDEF = vpacc0xCDEF;
+    uint32x4_t vpacc2x0123 = vpacc0x0123;
+    uint32x4_t vpacc2x4567 = vpacc0x4567;
+    uint32x4_t vpacc2x89AB = vpacc0x89AB;
+    uint32x4_t vpacc2xCDEF = vpacc0xCDEF;
+    uint32x4_t vpacc3x0123 = vpacc0x0123;
+    uint32x4_t vpacc3x4567 = vpacc0x4567;
+    uint32x4_t vpacc3x89AB = vpacc0x89AB;
+    uint32x4_t vpacc3xCDEF = vpacc0xCDEF;
+    uint32x4_t vpacc4x0123 = vpacc0x0123;
+    uint32x4_t vpacc4x4567 = vpacc0x4567;
+    uint32x4_t vpacc4x89AB = vpacc0x89AB;
+    uint32x4_t vpacc4xCDEF = vpacc0xCDEF;
+    uint32x4_t vpacc5x0123 = vpacc0x0123;
+    uint32x4_t vpacc5x4567 = vpacc0x4567;
+    uint32x4_t vpacc5x89AB = vpacc0x89AB;
+    uint32x4_t vpacc5xCDEF = vpacc0xCDEF;
+    uint32x4_t vpacc6x0123 = vpacc0x0123;
+    uint32x4_t vpacc6x4567 = vpacc0x4567;
+    uint32x4_t vpacc6x89AB = vpacc0x89AB;
+    uint32x4_t vpacc6xCDEF = vpacc0xCDEF;
+    uint32x4_t vpacc7x0123 = vpacc0x0123;
+    uint32x4_t vpacc7x4567 = vpacc0x4567;
+    uint32x4_t vpacc7x89AB = vpacc0x89AB;
+    uint32x4_t vpacc7xCDEF = vpacc0xCDEF;
+    uint32x4_t vnacc0x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc0x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc0x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc0xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc1x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc1x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc1x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc1xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc2x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc2x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc2x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc2xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc3x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc3x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc3x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc3xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc4x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc4x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc4x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc4xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc5x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc5x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc5x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc5xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc6x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc6x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc6x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc6xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc7x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc7x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc7x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc7xCDEF = vmovq_n_u32(0);
+
+    // Inner accumulation loop along the 16 columns.
+    size_t k = kc;
+    // 2x partial unrolled loop to load 8 bytes at a time.
+    while (k >= 8 * sizeof(uint8_t)) {
+      // Load a 8x8 block of activations.
+      const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
+      const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
+      const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 8;
+      const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 8;
+      const uint8x8_t va4x01234567 = vld1_u8(a4); a4 += 8;
+      const uint8x8_t va5x01234567 = vld1_u8(a5); a5 += 8;
+      const uint8x8_t va6x01234567 = vld1_u8(a6); a6 += 8;
+      const uint8x8_t va7x01234567 = vld1_u8(a7); a7 += 8;
+
+      // Load a 8x16 block of weights.
+      const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb4567x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb4567xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+      // Multiply-accumulate: 8x8 * 8x16 --> 8x16.
+      vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+      vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+      vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+      vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+      vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
+      vnacc0x89AB = vdotq_lane_u32(vnacc0x89AB, vb_zero_point, va0x01234567, 0);
+      vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
+      vnacc0xCDEF = vdotq_lane_u32(vnacc0xCDEF, vb_zero_point, va0x01234567, 0);
+      vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
+      vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 0);
+      vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
+      vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 0);
+      vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
+      vnacc1x89AB = vdotq_lane_u32(vnacc1x89AB, vb_zero_point, va1x01234567, 0);
+      vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
+      vnacc1xCDEF = vdotq_lane_u32(vnacc1xCDEF, vb_zero_point, va1x01234567, 0);
+      vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
+      vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 0);
+      vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
+      vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 0);
+      vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
+      vnacc2x89AB = vdotq_lane_u32(vnacc2x89AB, vb_zero_point, va2x01234567, 0);
+      vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
+      vnacc2xCDEF = vdotq_lane_u32(vnacc2xCDEF, vb_zero_point, va2x01234567, 0);
+      vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
+      vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 0);
+      vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
+      vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 0);
+      vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb0123x89AB, va3x01234567, 0);
+      vnacc3x89AB = vdotq_lane_u32(vnacc3x89AB, vb_zero_point, va3x01234567, 0);
+      vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
+      vnacc3xCDEF = vdotq_lane_u32(vnacc3xCDEF, vb_zero_point, va3x01234567, 0);
+      vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
+      vnacc4x0123 = vdotq_lane_u32(vnacc4x0123, vb_zero_point, va4x01234567, 0);
+      vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
+      vnacc4x4567 = vdotq_lane_u32(vnacc4x4567, vb_zero_point, va4x01234567, 0);
+      vpacc4x89AB = vdotq_lane_u32(vpacc4x89AB, vb0123x89AB, va4x01234567, 0);
+      vnacc4x89AB = vdotq_lane_u32(vnacc4x89AB, vb_zero_point, va4x01234567, 0);
+      vpacc4xCDEF = vdotq_lane_u32(vpacc4xCDEF, vb0123xCDEF, va4x01234567, 0);
+      vnacc4xCDEF = vdotq_lane_u32(vnacc4xCDEF, vb_zero_point, va4x01234567, 0);
+      vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb0123x0123, va5x01234567, 0);
+      vnacc5x0123 = vdotq_lane_u32(vnacc5x0123, vb_zero_point, va5x01234567, 0);
+      vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb0123x4567, va5x01234567, 0);
+      vnacc5x4567 = vdotq_lane_u32(vnacc5x4567, vb_zero_point, va5x01234567, 0);
+      vpacc5x89AB = vdotq_lane_u32(vpacc5x89AB, vb0123x89AB, va5x01234567, 0);
+      vnacc5x89AB = vdotq_lane_u32(vnacc5x89AB, vb_zero_point, va5x01234567, 0);
+      vpacc5xCDEF = vdotq_lane_u32(vpacc5xCDEF, vb0123xCDEF, va5x01234567, 0);
+      vnacc5xCDEF = vdotq_lane_u32(vnacc5xCDEF, vb_zero_point, va5x01234567, 0);
+      vpacc6x0123 = vdotq_lane_u32(vpacc6x0123, vb0123x0123, va6x01234567, 0);
+      vnacc6x0123 = vdotq_lane_u32(vnacc6x0123, vb_zero_point, va6x01234567, 0);
+      vpacc6x4567 = vdotq_lane_u32(vpacc6x4567, vb0123x4567, va6x01234567, 0);
+      vnacc6x4567 = vdotq_lane_u32(vnacc6x4567, vb_zero_point, va6x01234567, 0);
+      vpacc6x89AB = vdotq_lane_u32(vpacc6x89AB, vb0123x89AB, va6x01234567, 0);
+      vnacc6x89AB = vdotq_lane_u32(vnacc6x89AB, vb_zero_point, va6x01234567, 0);
+      vpacc6xCDEF = vdotq_lane_u32(vpacc6xCDEF, vb0123xCDEF, va6x01234567, 0);
+      vnacc6xCDEF = vdotq_lane_u32(vnacc6xCDEF, vb_zero_point, va6x01234567, 0);
+      vpacc7x0123 = vdotq_lane_u32(vpacc7x0123, vb0123x0123, va7x01234567, 0);
+      vnacc7x0123 = vdotq_lane_u32(vnacc7x0123, vb_zero_point, va7x01234567, 0);
+      vpacc7x4567 = vdotq_lane_u32(vpacc7x4567, vb0123x4567, va7x01234567, 0);
+      vnacc7x4567 = vdotq_lane_u32(vnacc7x4567, vb_zero_point, va7x01234567, 0);
+      vpacc7x89AB = vdotq_lane_u32(vpacc7x89AB, vb0123x89AB, va7x01234567, 0);
+      vnacc7x89AB = vdotq_lane_u32(vnacc7x89AB, vb_zero_point, va7x01234567, 0);
+      vpacc7xCDEF = vdotq_lane_u32(vpacc7xCDEF, vb0123xCDEF, va7x01234567, 0);
+      vnacc7xCDEF = vdotq_lane_u32(vnacc7xCDEF, vb_zero_point, va7x01234567, 0);
+      vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
+      vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 1);
+      vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
+      vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 1);
+      vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb4567x89AB, va0x01234567, 1);
+      vnacc0x89AB = vdotq_lane_u32(vnacc0x89AB, vb_zero_point, va0x01234567, 1);
+      vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb4567xCDEF, va0x01234567, 1);
+      vnacc0xCDEF = vdotq_lane_u32(vnacc0xCDEF, vb_zero_point, va0x01234567, 1);
+      vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
+      vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 1);
+      vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
+      vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 1);
+      vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb4567x89AB, va1x01234567, 1);
+      vnacc1x89AB = vdotq_lane_u32(vnacc1x89AB, vb_zero_point, va1x01234567, 1);
+      vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb4567xCDEF, va1x01234567, 1);
+      vnacc1xCDEF = vdotq_lane_u32(vnacc1xCDEF, vb_zero_point, va1x01234567, 1);
+      vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb4567x0123, va2x01234567, 1);
+      vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 1);
+      vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb4567x4567, va2x01234567, 1);
+      vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 1);
+      vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb4567x89AB, va2x01234567, 1);
+      vnacc2x89AB = vdotq_lane_u32(vnacc2x89AB, vb_zero_point, va2x01234567, 1);
+      vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb4567xCDEF, va2x01234567, 1);
+      vnacc2xCDEF = vdotq_lane_u32(vnacc2xCDEF, vb_zero_point, va2x01234567, 1);
+      vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb4567x0123, va3x01234567, 1);
+      vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 1);
+      vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb4567x4567, va3x01234567, 1);
+      vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 1);
+      vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb4567x89AB, va3x01234567, 1);
+      vnacc3x89AB = vdotq_lane_u32(vnacc3x89AB, vb_zero_point, va3x01234567, 1);
+      vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb4567xCDEF, va3x01234567, 1);
+      vnacc3xCDEF = vdotq_lane_u32(vnacc3xCDEF, vb_zero_point, va3x01234567, 1);
+      vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb4567x0123, va4x01234567, 1);
+      vnacc4x0123 = vdotq_lane_u32(vnacc4x0123, vb_zero_point, va4x01234567, 1);
+      vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb4567x4567, va4x01234567, 1);
+      vnacc4x4567 = vdotq_lane_u32(vnacc4x4567, vb_zero_point, va4x01234567, 1);
+      vpacc4x89AB = vdotq_lane_u32(vpacc4x89AB, vb4567x89AB, va4x01234567, 1);
+      vnacc4x89AB = vdotq_lane_u32(vnacc4x89AB, vb_zero_point, va4x01234567, 1);
+      vpacc4xCDEF = vdotq_lane_u32(vpacc4xCDEF, vb4567xCDEF, va4x01234567, 1);
+      vnacc4xCDEF = vdotq_lane_u32(vnacc4xCDEF, vb_zero_point, va4x01234567, 1);
+      vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb4567x0123, va5x01234567, 1);
+      vnacc5x0123 = vdotq_lane_u32(vnacc5x0123, vb_zero_point, va5x01234567, 1);
+      vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb4567x4567, va5x01234567, 1);
+      vnacc5x4567 = vdotq_lane_u32(vnacc5x4567, vb_zero_point, va5x01234567, 1);
+      vpacc5x89AB = vdotq_lane_u32(vpacc5x89AB, vb4567x89AB, va5x01234567, 1);
+      vnacc5x89AB = vdotq_lane_u32(vnacc5x89AB, vb_zero_point, va5x01234567, 1);
+      vpacc5xCDEF = vdotq_lane_u32(vpacc5xCDEF, vb4567xCDEF, va5x01234567, 1);
+      vnacc5xCDEF = vdotq_lane_u32(vnacc5xCDEF, vb_zero_point, va5x01234567, 1);
+      vpacc6x0123 = vdotq_lane_u32(vpacc6x0123, vb4567x0123, va6x01234567, 1);
+      vnacc6x0123 = vdotq_lane_u32(vnacc6x0123, vb_zero_point, va6x01234567, 1);
+      vpacc6x4567 = vdotq_lane_u32(vpacc6x4567, vb4567x4567, va6x01234567, 1);
+      vnacc6x4567 = vdotq_lane_u32(vnacc6x4567, vb_zero_point, va6x01234567, 1);
+      vpacc6x89AB = vdotq_lane_u32(vpacc6x89AB, vb4567x89AB, va6x01234567, 1);
+      vnacc6x89AB = vdotq_lane_u32(vnacc6x89AB, vb_zero_point, va6x01234567, 1);
+      vpacc6xCDEF = vdotq_lane_u32(vpacc6xCDEF, vb4567xCDEF, va6x01234567, 1);
+      vnacc6xCDEF = vdotq_lane_u32(vnacc6xCDEF, vb_zero_point, va6x01234567, 1);
+      vpacc7x0123 = vdotq_lane_u32(vpacc7x0123, vb4567x0123, va7x01234567, 1);
+      vnacc7x0123 = vdotq_lane_u32(vnacc7x0123, vb_zero_point, va7x01234567, 1);
+      vpacc7x4567 = vdotq_lane_u32(vpacc7x4567, vb4567x4567, va7x01234567, 1);
+      vnacc7x4567 = vdotq_lane_u32(vnacc7x4567, vb_zero_point, va7x01234567, 1);
+      vpacc7x89AB = vdotq_lane_u32(vpacc7x89AB, vb4567x89AB, va7x01234567, 1);
+      vnacc7x89AB = vdotq_lane_u32(vnacc7x89AB, vb_zero_point, va7x01234567, 1);
+      vpacc7xCDEF = vdotq_lane_u32(vpacc7xCDEF, vb4567xCDEF, va7x01234567, 1);
+      vnacc7xCDEF = vdotq_lane_u32(vnacc7xCDEF, vb_zero_point, va7x01234567, 1);
+
+      k -= 8 * sizeof(uint8_t);
+    }
+    // Handle up to 4 final positions of `k`
+    if XNN_UNLIKELY(k != 0) {
+      // Load a 8x4 block of activations.
+      const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 4;
+      const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 4;
+      const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 4;
+      const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 4;
+      const uint8x8_t va4x01234567 = vld1_u8(a4); a4 += 4;
+      const uint8x8_t va5x01234567 = vld1_u8(a5); a5 += 4;
+      const uint8x8_t va6x01234567 = vld1_u8(a6); a6 += 4;
+      const uint8x8_t va7x01234567 = vld1_u8(a7); a7 += 4;
+
+      // Load a 4x16 block of weights.
+      const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+      // Multiply-accumulate: 8x4 * 4x16 --> 8x16.
+      vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+      vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+      vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+      vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+      vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
+      vnacc0x89AB = vdotq_lane_u32(vnacc0x89AB, vb_zero_point, va0x01234567, 0);
+      vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
+      vnacc0xCDEF = vdotq_lane_u32(vnacc0xCDEF, vb_zero_point, va0x01234567, 0);
+      vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
+      vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 0);
+      vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
+      vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 0);
+      vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
+      vnacc1x89AB = vdotq_lane_u32(vnacc1x89AB, vb_zero_point, va1x01234567, 0);
+      vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
+      vnacc1xCDEF = vdotq_lane_u32(vnacc1xCDEF, vb_zero_point, va1x01234567, 0);
+      vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
+      vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 0);
+      vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
+      vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 0);
+      vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
+      vnacc2x89AB = vdotq_lane_u32(vnacc2x89AB, vb_zero_point, va2x01234567, 0);
+      vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
+      vnacc2xCDEF = vdotq_lane_u32(vnacc2xCDEF, vb_zero_point, va2x01234567, 0);
+      vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
+      vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 0);
+      vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
+      vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 0);
+      vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb0123x89AB, va3x01234567, 0);
+      vnacc3x89AB = vdotq_lane_u32(vnacc3x89AB, vb_zero_point, va3x01234567, 0);
+      vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
+      vnacc3xCDEF = vdotq_lane_u32(vnacc3xCDEF, vb_zero_point, va3x01234567, 0);
+      vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
+      vnacc4x0123 = vdotq_lane_u32(vnacc4x0123, vb_zero_point, va4x01234567, 0);
+      vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
+      vnacc4x4567 = vdotq_lane_u32(vnacc4x4567, vb_zero_point, va4x01234567, 0);
+      vpacc4x89AB = vdotq_lane_u32(vpacc4x89AB, vb0123x89AB, va4x01234567, 0);
+      vnacc4x89AB = vdotq_lane_u32(vnacc4x89AB, vb_zero_point, va4x01234567, 0);
+      vpacc4xCDEF = vdotq_lane_u32(vpacc4xCDEF, vb0123xCDEF, va4x01234567, 0);
+      vnacc4xCDEF = vdotq_lane_u32(vnacc4xCDEF, vb_zero_point, va4x01234567, 0);
+      vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb0123x0123, va5x01234567, 0);
+      vnacc5x0123 = vdotq_lane_u32(vnacc5x0123, vb_zero_point, va5x01234567, 0);
+      vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb0123x4567, va5x01234567, 0);
+      vnacc5x4567 = vdotq_lane_u32(vnacc5x4567, vb_zero_point, va5x01234567, 0);
+      vpacc5x89AB = vdotq_lane_u32(vpacc5x89AB, vb0123x89AB, va5x01234567, 0);
+      vnacc5x89AB = vdotq_lane_u32(vnacc5x89AB, vb_zero_point, va5x01234567, 0);
+      vpacc5xCDEF = vdotq_lane_u32(vpacc5xCDEF, vb0123xCDEF, va5x01234567, 0);
+      vnacc5xCDEF = vdotq_lane_u32(vnacc5xCDEF, vb_zero_point, va5x01234567, 0);
+      vpacc6x0123 = vdotq_lane_u32(vpacc6x0123, vb0123x0123, va6x01234567, 0);
+      vnacc6x0123 = vdotq_lane_u32(vnacc6x0123, vb_zero_point, va6x01234567, 0);
+      vpacc6x4567 = vdotq_lane_u32(vpacc6x4567, vb0123x4567, va6x01234567, 0);
+      vnacc6x4567 = vdotq_lane_u32(vnacc6x4567, vb_zero_point, va6x01234567, 0);
+      vpacc6x89AB = vdotq_lane_u32(vpacc6x89AB, vb0123x89AB, va6x01234567, 0);
+      vnacc6x89AB = vdotq_lane_u32(vnacc6x89AB, vb_zero_point, va6x01234567, 0);
+      vpacc6xCDEF = vdotq_lane_u32(vpacc6xCDEF, vb0123xCDEF, va6x01234567, 0);
+      vnacc6xCDEF = vdotq_lane_u32(vnacc6xCDEF, vb_zero_point, va6x01234567, 0);
+      vpacc7x0123 = vdotq_lane_u32(vpacc7x0123, vb0123x0123, va7x01234567, 0);
+      vnacc7x0123 = vdotq_lane_u32(vnacc7x0123, vb_zero_point, va7x01234567, 0);
+      vpacc7x4567 = vdotq_lane_u32(vpacc7x4567, vb0123x4567, va7x01234567, 0);
+      vnacc7x4567 = vdotq_lane_u32(vnacc7x4567, vb_zero_point, va7x01234567, 0);
+      vpacc7x89AB = vdotq_lane_u32(vpacc7x89AB, vb0123x89AB, va7x01234567, 0);
+      vnacc7x89AB = vdotq_lane_u32(vnacc7x89AB, vb_zero_point, va7x01234567, 0);
+      vpacc7xCDEF = vdotq_lane_u32(vpacc7xCDEF, vb0123xCDEF, va7x01234567, 0);
+      vnacc7xCDEF = vdotq_lane_u32(vnacc7xCDEF, vb_zero_point, va7x01234567, 0);
+    }
+
+    // Subtract zero point accumulators with accumulators.
+    int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
+    int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x4567));
+    int32x4_t vacc0x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc0x89AB, vnacc0x89AB));
+    int32x4_t vacc0xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc0xCDEF, vnacc0xCDEF));
+    int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
+    int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x4567));
+    int32x4_t vacc1x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc1x89AB, vnacc1x89AB));
+    int32x4_t vacc1xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc1xCDEF, vnacc1xCDEF));
+    int32x4_t vacc2x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x0123, vnacc2x0123));
+    int32x4_t vacc2x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x4567, vnacc2x4567));
+    int32x4_t vacc2x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc2x89AB, vnacc2x89AB));
+    int32x4_t vacc2xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc2xCDEF, vnacc2xCDEF));
+    int32x4_t vacc3x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x0123, vnacc3x0123));
+    int32x4_t vacc3x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x4567, vnacc3x4567));
+    int32x4_t vacc3x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc3x89AB, vnacc3x89AB));
+    int32x4_t vacc3xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc3xCDEF, vnacc3xCDEF));
+    int32x4_t vacc4x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x0123, vnacc4x0123));
+    int32x4_t vacc4x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x4567, vnacc4x4567));
+    int32x4_t vacc4x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc4x89AB, vnacc4x89AB));
+    int32x4_t vacc4xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc4xCDEF, vnacc4xCDEF));
+    int32x4_t vacc5x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc5x0123, vnacc5x0123));
+    int32x4_t vacc5x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc5x4567, vnacc5x4567));
+    int32x4_t vacc5x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc5x89AB, vnacc5x89AB));
+    int32x4_t vacc5xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc5xCDEF, vnacc5xCDEF));
+    int32x4_t vacc6x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc6x0123, vnacc6x0123));
+    int32x4_t vacc6x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc6x4567, vnacc6x4567));
+    int32x4_t vacc6x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc6x89AB, vnacc6x89AB));
+    int32x4_t vacc6xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc6xCDEF, vnacc6xCDEF));
+    int32x4_t vacc7x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc7x0123, vnacc7x0123));
+    int32x4_t vacc7x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc7x4567, vnacc7x4567));
+    int32x4_t vacc7x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc7x89AB, vnacc7x89AB));
+    int32x4_t vacc7xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc7xCDEF, vnacc7xCDEF));
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+    vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift);
+    vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift);
+    vacc4x0123 = vshlq_s32(vacc4x0123, vright_pre_shift);
+    vacc4x4567 = vshlq_s32(vacc4x4567, vright_pre_shift);
+    vacc4x89AB = vshlq_s32(vacc4x89AB, vright_pre_shift);
+    vacc4xCDEF = vshlq_s32(vacc4xCDEF, vright_pre_shift);
+    vacc5x0123 = vshlq_s32(vacc5x0123, vright_pre_shift);
+    vacc5x4567 = vshlq_s32(vacc5x4567, vright_pre_shift);
+    vacc5x89AB = vshlq_s32(vacc5x89AB, vright_pre_shift);
+    vacc5xCDEF = vshlq_s32(vacc5xCDEF, vright_pre_shift);
+    vacc6x0123 = vshlq_s32(vacc6x0123, vright_pre_shift);
+    vacc6x4567 = vshlq_s32(vacc6x4567, vright_pre_shift);
+    vacc6x89AB = vshlq_s32(vacc6x89AB, vright_pre_shift);
+    vacc6xCDEF = vshlq_s32(vacc6xCDEF, vright_pre_shift);
+    vacc7x0123 = vshlq_s32(vacc7x0123, vright_pre_shift);
+    vacc7x4567 = vshlq_s32(vacc7x4567, vright_pre_shift);
+    vacc7x89AB = vshlq_s32(vacc7x89AB, vright_pre_shift);
+    vacc7xCDEF = vshlq_s32(vacc7xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+    vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
+    vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
+    vacc4x0123 = vqdmulhq_s32(vacc4x0123, vmultiplier);
+    vacc4x4567 = vqdmulhq_s32(vacc4x4567, vmultiplier);
+    vacc4x89AB = vqdmulhq_s32(vacc4x89AB, vmultiplier);
+    vacc4xCDEF = vqdmulhq_s32(vacc4xCDEF, vmultiplier);
+    vacc5x0123 = vqdmulhq_s32(vacc5x0123, vmultiplier);
+    vacc5x4567 = vqdmulhq_s32(vacc5x4567, vmultiplier);
+    vacc5x89AB = vqdmulhq_s32(vacc5x89AB, vmultiplier);
+    vacc5xCDEF = vqdmulhq_s32(vacc5xCDEF, vmultiplier);
+    vacc6x0123 = vqdmulhq_s32(vacc6x0123, vmultiplier);
+    vacc6x4567 = vqdmulhq_s32(vacc6x4567, vmultiplier);
+    vacc6x89AB = vqdmulhq_s32(vacc6x89AB, vmultiplier);
+    vacc6xCDEF = vqdmulhq_s32(vacc6xCDEF, vmultiplier);
+    vacc7x0123 = vqdmulhq_s32(vacc7x0123, vmultiplier);
+    vacc7x4567 = vqdmulhq_s32(vacc7x4567, vmultiplier);
+    vacc7x89AB = vqdmulhq_s32(vacc7x89AB, vmultiplier);
+    vacc7xCDEF = vqdmulhq_s32(vacc7xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+    vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
+    vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
+    vacc4x0123 = vrshlq_s32(vacc4x0123, vright_post_shift);
+    vacc4x4567 = vrshlq_s32(vacc4x4567, vright_post_shift);
+    vacc4x89AB = vrshlq_s32(vacc4x89AB, vright_post_shift);
+    vacc4xCDEF = vrshlq_s32(vacc4xCDEF, vright_post_shift);
+    vacc5x0123 = vrshlq_s32(vacc5x0123, vright_post_shift);
+    vacc5x4567 = vrshlq_s32(vacc5x4567, vright_post_shift);
+    vacc5x89AB = vrshlq_s32(vacc5x89AB, vright_post_shift);
+    vacc5xCDEF = vrshlq_s32(vacc5xCDEF, vright_post_shift);
+    vacc6x0123 = vrshlq_s32(vacc6x0123, vright_post_shift);
+    vacc6x4567 = vrshlq_s32(vacc6x4567, vright_post_shift);
+    vacc6x89AB = vrshlq_s32(vacc6x89AB, vright_post_shift);
+    vacc6xCDEF = vrshlq_s32(vacc6xCDEF, vright_post_shift);
+    vacc7x0123 = vrshlq_s32(vacc7x0123, vright_post_shift);
+    vacc7x4567 = vrshlq_s32(vacc7x4567, vright_post_shift);
+    vacc7x89AB = vrshlq_s32(vacc7x89AB, vright_post_shift);
+    vacc7xCDEF = vrshlq_s32(vacc7xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
+    const int16x8_t vacc4x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x0123), vacc4x4567), voutput_zero_point);
+    const int16x8_t vacc4x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x89AB), vacc4xCDEF), voutput_zero_point);
+    const int16x8_t vacc5x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc5x0123), vacc5x4567), voutput_zero_point);
+    const int16x8_t vacc5x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc5x89AB), vacc5xCDEF), voutput_zero_point);
+    const int16x8_t vacc6x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc6x0123), vacc6x4567), voutput_zero_point);
+    const int16x8_t vacc6x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc6x89AB), vacc6xCDEF), voutput_zero_point);
+    const int16x8_t vacc7x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc7x0123), vacc7x4567), voutput_zero_point);
+    const int16x8_t vacc7x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc7x89AB), vacc7xCDEF), voutput_zero_point);
+
+    uint8x16_t vout0x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc0x89ABCDEF);
+    uint8x16_t vout1x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc1x01234567), vacc1x89ABCDEF);
+    uint8x16_t vout2x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc2x01234567), vacc2x89ABCDEF);
+    uint8x16_t vout3x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc3x01234567), vacc3x89ABCDEF);
+    uint8x16_t vout4x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc4x01234567), vacc4x89ABCDEF);
+    uint8x16_t vout5x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc5x01234567), vacc5x89ABCDEF);
+    uint8x16_t vout6x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc6x01234567), vacc6x89ABCDEF);
+    uint8x16_t vout7x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc7x01234567), vacc7x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
+    const int16x8_t vacc4x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x0123), vqmovn_s32(vacc4x4567)), voutput_zero_point);
+    const int16x8_t vacc4x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x89AB), vqmovn_s32(vacc4xCDEF)), voutput_zero_point);
+    const int16x8_t vacc5x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc5x0123), vqmovn_s32(vacc5x4567)), voutput_zero_point);
+    const int16x8_t vacc5x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc5x89AB), vqmovn_s32(vacc5xCDEF)), voutput_zero_point);
+    const int16x8_t vacc6x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc6x0123), vqmovn_s32(vacc6x4567)), voutput_zero_point);
+    const int16x8_t vacc6x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc6x89AB), vqmovn_s32(vacc6xCDEF)), voutput_zero_point);
+    const int16x8_t vacc7x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc7x0123), vqmovn_s32(vacc7x4567)), voutput_zero_point);
+    const int16x8_t vacc7x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc7x89AB), vqmovn_s32(vacc7xCDEF)), voutput_zero_point);
+
+    uint8x16_t vout0x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc0x89ABCDEF));
+    uint8x16_t vout1x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc1x01234567), vqmovun_s16(vacc1x89ABCDEF));
+    uint8x16_t vout2x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc2x01234567), vqmovun_s16(vacc2x89ABCDEF));
+    uint8x16_t vout3x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc3x01234567), vqmovun_s16(vacc3x89ABCDEF));
+    uint8x16_t vout4x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc4x01234567), vqmovun_s16(vacc4x89ABCDEF));
+    uint8x16_t vout5x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc5x01234567), vqmovun_s16(vacc5x89ABCDEF));
+    uint8x16_t vout6x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc6x01234567), vqmovun_s16(vacc6x89ABCDEF));
+    uint8x16_t vout7x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc7x01234567), vqmovun_s16(vacc7x89ABCDEF));
+#endif
+    const uint8x16_t voutput_min = vld1q_dup_u8(&params->rndnu_neon.output_min);
+    const uint8x16_t voutput_max = vld1q_dup_u8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_u8(vout0x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_u8(vout1x0123456789ABCDEF, voutput_min);
+    vout2x0123456789ABCDEF = vmaxq_u8(vout2x0123456789ABCDEF, voutput_min);
+    vout3x0123456789ABCDEF = vmaxq_u8(vout3x0123456789ABCDEF, voutput_min);
+    vout4x0123456789ABCDEF = vmaxq_u8(vout4x0123456789ABCDEF, voutput_min);
+    vout5x0123456789ABCDEF = vmaxq_u8(vout5x0123456789ABCDEF, voutput_min);
+    vout6x0123456789ABCDEF = vmaxq_u8(vout6x0123456789ABCDEF, voutput_min);
+    vout7x0123456789ABCDEF = vmaxq_u8(vout7x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_u8(vout0x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_u8(vout1x0123456789ABCDEF, voutput_max);
+    vout2x0123456789ABCDEF = vminq_u8(vout2x0123456789ABCDEF, voutput_max);
+    vout3x0123456789ABCDEF = vminq_u8(vout3x0123456789ABCDEF, voutput_max);
+    vout4x0123456789ABCDEF = vminq_u8(vout4x0123456789ABCDEF, voutput_max);
+    vout5x0123456789ABCDEF = vminq_u8(vout5x0123456789ABCDEF, voutput_max);
+    vout6x0123456789ABCDEF = vminq_u8(vout6x0123456789ABCDEF, voutput_max);
+    vout7x0123456789ABCDEF = vminq_u8(vout7x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_u8(c0 + 0, vout0x0123456789ABCDEF);
+      vst1q_u8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_u8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_u8(c3 + 0, vout3x0123456789ABCDEF);
+      vst1q_u8(c4 + 0, vout4x0123456789ABCDEF);
+      vst1q_u8(c5 + 0, vout5x0123456789ABCDEF);
+      vst1q_u8(c6 + 0, vout6x0123456789ABCDEF);
+      vst1q_u8(c7 + 0, vout7x0123456789ABCDEF);
+
+      c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
+      c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
+      c4 = (uint8_t*) ((uintptr_t) c4 + cn_stride);
+      c5 = (uint8_t*) ((uintptr_t) c5 + cn_stride);
+      c6 = (uint8_t*) ((uintptr_t) c6 + cn_stride);
+      c7 = (uint8_t*) ((uintptr_t) c7 + cn_stride);
+
+      a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const uint8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const uint8_t*) ((uintptr_t) a2 - kc);
+      a3 = (const uint8_t*) ((uintptr_t) a3 - kc);
+      a4 = (const uint8_t*) ((uintptr_t) a4 - kc);
+      a5 = (const uint8_t*) ((uintptr_t) a5 - kc);
+      a6 = (const uint8_t*) ((uintptr_t) a6 - kc);
+      a7 = (const uint8_t*) ((uintptr_t) a7 - kc);
+
+      nc -= 16;
+    } else {
+      uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vget_low_u8(vout0x0123456789ABCDEF), vget_low_u8(vout1x0123456789ABCDEF));
+      uint8x16_t vout2x01234567_3x01234567 = vcombine_u8(vget_low_u8(vout2x0123456789ABCDEF), vget_low_u8(vout3x0123456789ABCDEF));
+      uint8x16_t vout4x01234567_5x01234567 = vcombine_u8(vget_low_u8(vout4x0123456789ABCDEF), vget_low_u8(vout5x0123456789ABCDEF));
+      uint8x16_t vout6x01234567_7x01234567 = vcombine_u8(vget_low_u8(vout6x0123456789ABCDEF), vget_low_u8(vout7x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_u8(c0, vget_low_u8(vout0x01234567_1x01234567)); c0 += 8;
+        vst1_u8(c1, vget_high_u8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_u8(c2, vget_low_u8(vout2x01234567_3x01234567)); c2 += 8;
+        vst1_u8(c3, vget_high_u8(vout2x01234567_3x01234567)); c3 += 8;
+        vst1_u8(c4, vget_low_u8(vout4x01234567_5x01234567)); c4 += 8;
+        vst1_u8(c5, vget_high_u8(vout4x01234567_5x01234567)); c5 += 8;
+        vst1_u8(c6, vget_low_u8(vout6x01234567_7x01234567)); c6 += 8;
+        vst1_u8(c7, vget_high_u8(vout6x01234567_7x01234567)); c7 += 8;
+        vout0x01234567_1x01234567 = vcombine_u8(vget_high_u8(vout0x0123456789ABCDEF), vget_high_u8(vout1x0123456789ABCDEF));
+        vout2x01234567_3x01234567 = vcombine_u8(vget_high_u8(vout2x0123456789ABCDEF), vget_high_u8(vout3x0123456789ABCDEF));
+        vout4x01234567_5x01234567 = vcombine_u8(vget_high_u8(vout4x0123456789ABCDEF), vget_high_u8(vout5x0123456789ABCDEF));
+        vout6x01234567_7x01234567 = vcombine_u8(vget_high_u8(vout6x0123456789ABCDEF), vget_high_u8(vout7x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c4, 1), vreinterpretq_u32_u8(vout4x01234567_5x01234567), 0); c4 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c5, 1), vreinterpretq_u32_u8(vout4x01234567_5x01234567), 2); c5 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c6, 1), vreinterpretq_u32_u8(vout6x01234567_7x01234567), 0); c6 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c7, 1), vreinterpretq_u32_u8(vout6x01234567_7x01234567), 2); c7 += 4;
+        vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 4);
+        vout6x01234567_7x01234567 = vextq_u8(vout6x01234567_7x01234567, vout6x01234567_7x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_u8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_u8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c4, 1), vreinterpretq_u16_u8(vout4x01234567_5x01234567), 0); c4 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c5, 1), vreinterpretq_u16_u8(vout4x01234567_5x01234567), 4); c5 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c6, 1), vreinterpretq_u16_u8(vout6x01234567_7x01234567), 0); c6 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c7, 1), vreinterpretq_u16_u8(vout6x01234567_7x01234567), 4); c7 += 2;
+        vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+        vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 2);
+        vout6x01234567_7x01234567 = vextq_u8(vout6x01234567_7x01234567, vout6x01234567_7x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_u8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_u8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_u8(c4, vout4x01234567_5x01234567, 0);
+        vst1q_lane_u8(c5, vout4x01234567_5x01234567, 8);
+        vst1q_lane_u8(c6, vout6x01234567_7x01234567, 0);
+        vst1q_lane_u8(c7, vout6x01234567_7x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qu8-gemm/gen/8x8c4-minmax-rndnu-neondot.c b/src/qu8-gemm/gen/8x8c4-minmax-rndnu-neondot.c
new file mode 100644
index 0000000..395a370
--- /dev/null
+++ b/src/qu8-gemm/gen/8x8c4-minmax-rndnu-neondot.c
@@ -0,0 +1,451 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qu8-gemm/c4-neondot.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const uint8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    uint8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 8);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(uint8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 4 * sizeof(uint8_t));
+  const uint8_t* a0 = a;
+  uint8_t* c0 = c;
+  const uint8_t* a1 = (const uint8_t*) ((uintptr_t) a0 + a_stride);
+  uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const uint8_t* a2 = (const uint8_t*) ((uintptr_t) a1 + a_stride);
+  uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const uint8_t* a3 = (const uint8_t*) ((uintptr_t) a2 + a_stride);
+  uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+  const uint8_t* a4 = (const uint8_t*) ((uintptr_t) a3 + a_stride);
+  uint8_t* c4 = (uint8_t*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    a4 = a3;
+    c4 = c3;
+  }
+  const uint8_t* a5 = (const uint8_t*) ((uintptr_t) a4 + a_stride);
+  uint8_t* c5 = (uint8_t*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 6) {
+    a5 = a4;
+    c5 = c4;
+  }
+  const uint8_t* a6 = (const uint8_t*) ((uintptr_t) a5 + a_stride);
+  uint8_t* c6 = (uint8_t*) ((uintptr_t) c5 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 6) {
+    a6 = a5;
+    c6 = c5;
+  }
+  const uint8_t* a7 = (const uint8_t*) ((uintptr_t) a6 + a_stride);
+  uint8_t* c7 = (uint8_t*) ((uintptr_t) c6 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 8) {
+    a7 = a6;
+    c7 = c6;
+  }
+
+  const uint8x16_t vb_zero_point = vld1q_dup_u8(&params->rndnu_neon.kernel_zero_point[0]);
+
+  // Loop over groups of 8 columns.
+  do {
+    // Initialize accumulators with bias. 8 bias values are loaded from the
+    // weight matrix, at the start of the group of 8 columns.
+    uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc1x0123 = vpacc0x0123;
+    uint32x4_t vpacc1x4567 = vpacc0x4567;
+    uint32x4_t vpacc2x0123 = vpacc0x0123;
+    uint32x4_t vpacc2x4567 = vpacc0x4567;
+    uint32x4_t vpacc3x0123 = vpacc0x0123;
+    uint32x4_t vpacc3x4567 = vpacc0x4567;
+    uint32x4_t vpacc4x0123 = vpacc0x0123;
+    uint32x4_t vpacc4x4567 = vpacc0x4567;
+    uint32x4_t vpacc5x0123 = vpacc0x0123;
+    uint32x4_t vpacc5x4567 = vpacc0x4567;
+    uint32x4_t vpacc6x0123 = vpacc0x0123;
+    uint32x4_t vpacc6x4567 = vpacc0x4567;
+    uint32x4_t vpacc7x0123 = vpacc0x0123;
+    uint32x4_t vpacc7x4567 = vpacc0x4567;
+    uint32x4_t vnacc0x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc0x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc1x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc1x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc2x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc2x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc3x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc3x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc4x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc4x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc5x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc5x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc6x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc6x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc7x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc7x4567 = vmovq_n_u32(0);
+
+    // Inner accumulation loop along the 8 columns.
+    size_t k = kc;
+    // 2x partial unrolled loop to load 8 bytes at a time.
+    while (k >= 8 * sizeof(uint8_t)) {
+      // Load a 8x8 block of activations.
+      const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
+      const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
+      const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 8;
+      const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 8;
+      const uint8x8_t va4x01234567 = vld1_u8(a4); a4 += 8;
+      const uint8x8_t va5x01234567 = vld1_u8(a5); a5 += 8;
+      const uint8x8_t va6x01234567 = vld1_u8(a6); a6 += 8;
+      const uint8x8_t va7x01234567 = vld1_u8(a7); a7 += 8;
+
+      // Load a 8x8 block of weights.
+      const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+      // Multiply-accumulate: 8x8 * 8x8 --> 8x8.
+      vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+      vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+      vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+      vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+      vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
+      vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 0);
+      vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
+      vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 0);
+      vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
+      vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 0);
+      vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
+      vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 0);
+      vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
+      vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 0);
+      vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
+      vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 0);
+      vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
+      vnacc4x0123 = vdotq_lane_u32(vnacc4x0123, vb_zero_point, va4x01234567, 0);
+      vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
+      vnacc4x4567 = vdotq_lane_u32(vnacc4x4567, vb_zero_point, va4x01234567, 0);
+      vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb0123x0123, va5x01234567, 0);
+      vnacc5x0123 = vdotq_lane_u32(vnacc5x0123, vb_zero_point, va5x01234567, 0);
+      vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb0123x4567, va5x01234567, 0);
+      vnacc5x4567 = vdotq_lane_u32(vnacc5x4567, vb_zero_point, va5x01234567, 0);
+      vpacc6x0123 = vdotq_lane_u32(vpacc6x0123, vb0123x0123, va6x01234567, 0);
+      vnacc6x0123 = vdotq_lane_u32(vnacc6x0123, vb_zero_point, va6x01234567, 0);
+      vpacc6x4567 = vdotq_lane_u32(vpacc6x4567, vb0123x4567, va6x01234567, 0);
+      vnacc6x4567 = vdotq_lane_u32(vnacc6x4567, vb_zero_point, va6x01234567, 0);
+      vpacc7x0123 = vdotq_lane_u32(vpacc7x0123, vb0123x0123, va7x01234567, 0);
+      vnacc7x0123 = vdotq_lane_u32(vnacc7x0123, vb_zero_point, va7x01234567, 0);
+      vpacc7x4567 = vdotq_lane_u32(vpacc7x4567, vb0123x4567, va7x01234567, 0);
+      vnacc7x4567 = vdotq_lane_u32(vnacc7x4567, vb_zero_point, va7x01234567, 0);
+      vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
+      vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 1);
+      vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
+      vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 1);
+      vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
+      vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 1);
+      vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
+      vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 1);
+      vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb4567x0123, va2x01234567, 1);
+      vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 1);
+      vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb4567x4567, va2x01234567, 1);
+      vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 1);
+      vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb4567x0123, va3x01234567, 1);
+      vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 1);
+      vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb4567x4567, va3x01234567, 1);
+      vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 1);
+      vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb4567x0123, va4x01234567, 1);
+      vnacc4x0123 = vdotq_lane_u32(vnacc4x0123, vb_zero_point, va4x01234567, 1);
+      vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb4567x4567, va4x01234567, 1);
+      vnacc4x4567 = vdotq_lane_u32(vnacc4x4567, vb_zero_point, va4x01234567, 1);
+      vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb4567x0123, va5x01234567, 1);
+      vnacc5x0123 = vdotq_lane_u32(vnacc5x0123, vb_zero_point, va5x01234567, 1);
+      vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb4567x4567, va5x01234567, 1);
+      vnacc5x4567 = vdotq_lane_u32(vnacc5x4567, vb_zero_point, va5x01234567, 1);
+      vpacc6x0123 = vdotq_lane_u32(vpacc6x0123, vb4567x0123, va6x01234567, 1);
+      vnacc6x0123 = vdotq_lane_u32(vnacc6x0123, vb_zero_point, va6x01234567, 1);
+      vpacc6x4567 = vdotq_lane_u32(vpacc6x4567, vb4567x4567, va6x01234567, 1);
+      vnacc6x4567 = vdotq_lane_u32(vnacc6x4567, vb_zero_point, va6x01234567, 1);
+      vpacc7x0123 = vdotq_lane_u32(vpacc7x0123, vb4567x0123, va7x01234567, 1);
+      vnacc7x0123 = vdotq_lane_u32(vnacc7x0123, vb_zero_point, va7x01234567, 1);
+      vpacc7x4567 = vdotq_lane_u32(vpacc7x4567, vb4567x4567, va7x01234567, 1);
+      vnacc7x4567 = vdotq_lane_u32(vnacc7x4567, vb_zero_point, va7x01234567, 1);
+
+      k -= 8 * sizeof(uint8_t);
+    }
+    // Handle up to 4 final positions of `k`
+    if XNN_UNLIKELY(k != 0) {
+      // Load a 8x4 block of activations.
+      const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 4;
+      const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 4;
+      const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 4;
+      const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 4;
+      const uint8x8_t va4x01234567 = vld1_u8(a4); a4 += 4;
+      const uint8x8_t va5x01234567 = vld1_u8(a5); a5 += 4;
+      const uint8x8_t va6x01234567 = vld1_u8(a6); a6 += 4;
+      const uint8x8_t va7x01234567 = vld1_u8(a7); a7 += 4;
+
+      // Load a 4x8 block of weights.
+      const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+      const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+      // Multiply-accumulate: 8x4 * 4x8 --> 8x8.
+      vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+      vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+      vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+      vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+      vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
+      vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 0);
+      vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
+      vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 0);
+      vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
+      vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 0);
+      vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
+      vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 0);
+      vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
+      vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 0);
+      vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
+      vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 0);
+      vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
+      vnacc4x0123 = vdotq_lane_u32(vnacc4x0123, vb_zero_point, va4x01234567, 0);
+      vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
+      vnacc4x4567 = vdotq_lane_u32(vnacc4x4567, vb_zero_point, va4x01234567, 0);
+      vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb0123x0123, va5x01234567, 0);
+      vnacc5x0123 = vdotq_lane_u32(vnacc5x0123, vb_zero_point, va5x01234567, 0);
+      vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb0123x4567, va5x01234567, 0);
+      vnacc5x4567 = vdotq_lane_u32(vnacc5x4567, vb_zero_point, va5x01234567, 0);
+      vpacc6x0123 = vdotq_lane_u32(vpacc6x0123, vb0123x0123, va6x01234567, 0);
+      vnacc6x0123 = vdotq_lane_u32(vnacc6x0123, vb_zero_point, va6x01234567, 0);
+      vpacc6x4567 = vdotq_lane_u32(vpacc6x4567, vb0123x4567, va6x01234567, 0);
+      vnacc6x4567 = vdotq_lane_u32(vnacc6x4567, vb_zero_point, va6x01234567, 0);
+      vpacc7x0123 = vdotq_lane_u32(vpacc7x0123, vb0123x0123, va7x01234567, 0);
+      vnacc7x0123 = vdotq_lane_u32(vnacc7x0123, vb_zero_point, va7x01234567, 0);
+      vpacc7x4567 = vdotq_lane_u32(vpacc7x4567, vb0123x4567, va7x01234567, 0);
+      vnacc7x4567 = vdotq_lane_u32(vnacc7x4567, vb_zero_point, va7x01234567, 0);
+    }
+
+    // Subtract zero point accumulators with accumulators.
+    int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
+    int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x4567));
+    int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
+    int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x4567));
+    int32x4_t vacc2x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x0123, vnacc2x0123));
+    int32x4_t vacc2x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x4567, vnacc2x4567));
+    int32x4_t vacc3x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x0123, vnacc3x0123));
+    int32x4_t vacc3x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x4567, vnacc3x4567));
+    int32x4_t vacc4x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x0123, vnacc4x0123));
+    int32x4_t vacc4x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x4567, vnacc4x4567));
+    int32x4_t vacc5x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc5x0123, vnacc5x0123));
+    int32x4_t vacc5x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc5x4567, vnacc5x4567));
+    int32x4_t vacc6x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc6x0123, vnacc6x0123));
+    int32x4_t vacc6x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc6x4567, vnacc6x4567));
+    int32x4_t vacc7x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc7x0123, vnacc7x0123));
+    int32x4_t vacc7x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc7x4567, vnacc7x4567));
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+    vacc4x0123 = vshlq_s32(vacc4x0123, vright_pre_shift);
+    vacc4x4567 = vshlq_s32(vacc4x4567, vright_pre_shift);
+    vacc5x0123 = vshlq_s32(vacc5x0123, vright_pre_shift);
+    vacc5x4567 = vshlq_s32(vacc5x4567, vright_pre_shift);
+    vacc6x0123 = vshlq_s32(vacc6x0123, vright_pre_shift);
+    vacc6x4567 = vshlq_s32(vacc6x4567, vright_pre_shift);
+    vacc7x0123 = vshlq_s32(vacc7x0123, vright_pre_shift);
+    vacc7x4567 = vshlq_s32(vacc7x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+    vacc4x0123 = vqdmulhq_s32(vacc4x0123, vmultiplier);
+    vacc4x4567 = vqdmulhq_s32(vacc4x4567, vmultiplier);
+    vacc5x0123 = vqdmulhq_s32(vacc5x0123, vmultiplier);
+    vacc5x4567 = vqdmulhq_s32(vacc5x4567, vmultiplier);
+    vacc6x0123 = vqdmulhq_s32(vacc6x0123, vmultiplier);
+    vacc6x4567 = vqdmulhq_s32(vacc6x4567, vmultiplier);
+    vacc7x0123 = vqdmulhq_s32(vacc7x0123, vmultiplier);
+    vacc7x4567 = vqdmulhq_s32(vacc7x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+    vacc4x0123 = vrshlq_s32(vacc4x0123, vright_post_shift);
+    vacc4x4567 = vrshlq_s32(vacc4x4567, vright_post_shift);
+    vacc5x0123 = vrshlq_s32(vacc5x0123, vright_post_shift);
+    vacc5x4567 = vrshlq_s32(vacc5x4567, vright_post_shift);
+    vacc6x0123 = vrshlq_s32(vacc6x0123, vright_post_shift);
+    vacc6x4567 = vrshlq_s32(vacc6x4567, vright_post_shift);
+    vacc7x0123 = vrshlq_s32(vacc7x0123, vright_post_shift);
+    vacc7x4567 = vrshlq_s32(vacc7x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+    const int16x8_t vacc4x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x0123), vacc4x4567), voutput_zero_point);
+    const int16x8_t vacc5x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc5x0123), vacc5x4567), voutput_zero_point);
+    const int16x8_t vacc6x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc6x0123), vacc6x4567), voutput_zero_point);
+    const int16x8_t vacc7x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc7x0123), vacc7x4567), voutput_zero_point);
+
+    uint8x16_t vout0x01234567_1x01234567 = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc1x01234567);
+    uint8x16_t vout2x01234567_3x01234567 = vqmovun_high_s16(vqmovun_s16(vacc2x01234567), vacc3x01234567);
+    uint8x16_t vout4x01234567_5x01234567 = vqmovun_high_s16(vqmovun_s16(vacc4x01234567), vacc5x01234567);
+    uint8x16_t vout6x01234567_7x01234567 = vqmovun_high_s16(vqmovun_s16(vacc6x01234567), vacc7x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+    const int16x8_t vacc4x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x0123), vqmovn_s32(vacc4x4567)), voutput_zero_point);
+    const int16x8_t vacc5x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc5x0123), vqmovn_s32(vacc5x4567)), voutput_zero_point);
+    const int16x8_t vacc6x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc6x0123), vqmovn_s32(vacc6x4567)), voutput_zero_point);
+    const int16x8_t vacc7x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc7x0123), vqmovn_s32(vacc7x4567)), voutput_zero_point);
+
+    uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc1x01234567));
+    uint8x16_t vout2x01234567_3x01234567 = vcombine_u8(vqmovun_s16(vacc2x01234567), vqmovun_s16(vacc3x01234567));
+    uint8x16_t vout4x01234567_5x01234567 = vcombine_u8(vqmovun_s16(vacc4x01234567), vqmovun_s16(vacc5x01234567));
+    uint8x16_t vout6x01234567_7x01234567 = vcombine_u8(vqmovun_s16(vacc6x01234567), vqmovun_s16(vacc7x01234567));
+#endif
+    const uint8x16_t voutput_min = vld1q_dup_u8(&params->rndnu_neon.output_min);
+    const uint8x16_t voutput_max = vld1q_dup_u8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_u8(vout0x01234567_1x01234567, voutput_min);
+    vout2x01234567_3x01234567 = vmaxq_u8(vout2x01234567_3x01234567, voutput_min);
+    vout4x01234567_5x01234567 = vmaxq_u8(vout4x01234567_5x01234567, voutput_min);
+    vout6x01234567_7x01234567 = vmaxq_u8(vout6x01234567_7x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_u8(vout0x01234567_1x01234567, voutput_max);
+    vout2x01234567_3x01234567 = vminq_u8(vout2x01234567_3x01234567, voutput_max);
+    vout4x01234567_5x01234567 = vminq_u8(vout4x01234567_5x01234567, voutput_max);
+    vout6x01234567_7x01234567 = vminq_u8(vout6x01234567_7x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_u8(c0 + 0, vget_low_u8(vout0x01234567_1x01234567));
+      vst1_u8(c1 + 0, vget_high_u8(vout0x01234567_1x01234567));
+      vst1_u8(c2 + 0, vget_low_u8(vout2x01234567_3x01234567));
+      vst1_u8(c3 + 0, vget_high_u8(vout2x01234567_3x01234567));
+      vst1_u8(c4 + 0, vget_low_u8(vout4x01234567_5x01234567));
+      vst1_u8(c5 + 0, vget_high_u8(vout4x01234567_5x01234567));
+      vst1_u8(c6 + 0, vget_low_u8(vout6x01234567_7x01234567));
+      vst1_u8(c7 + 0, vget_high_u8(vout6x01234567_7x01234567));
+
+      c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
+      c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
+      c4 = (uint8_t*) ((uintptr_t) c4 + cn_stride);
+      c5 = (uint8_t*) ((uintptr_t) c5 + cn_stride);
+      c6 = (uint8_t*) ((uintptr_t) c6 + cn_stride);
+      c7 = (uint8_t*) ((uintptr_t) c7 + cn_stride);
+
+      a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const uint8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const uint8_t*) ((uintptr_t) a2 - kc);
+      a3 = (const uint8_t*) ((uintptr_t) a3 - kc);
+      a4 = (const uint8_t*) ((uintptr_t) a4 - kc);
+      a5 = (const uint8_t*) ((uintptr_t) a5 - kc);
+      a6 = (const uint8_t*) ((uintptr_t) a6 - kc);
+      a7 = (const uint8_t*) ((uintptr_t) a7 - kc);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c4, 1), vreinterpretq_u32_u8(vout4x01234567_5x01234567), 0); c4 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c5, 1), vreinterpretq_u32_u8(vout4x01234567_5x01234567), 2); c5 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c6, 1), vreinterpretq_u32_u8(vout6x01234567_7x01234567), 0); c6 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c7, 1), vreinterpretq_u32_u8(vout6x01234567_7x01234567), 2); c7 += 4;
+        vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 4);
+        vout6x01234567_7x01234567 = vextq_u8(vout6x01234567_7x01234567, vout6x01234567_7x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_u8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_u8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c4, 1), vreinterpretq_u16_u8(vout4x01234567_5x01234567), 0); c4 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c5, 1), vreinterpretq_u16_u8(vout4x01234567_5x01234567), 4); c5 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c6, 1), vreinterpretq_u16_u8(vout6x01234567_7x01234567), 0); c6 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c7, 1), vreinterpretq_u16_u8(vout6x01234567_7x01234567), 4); c7 += 2;
+        vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+        vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 2);
+        vout6x01234567_7x01234567 = vextq_u8(vout6x01234567_7x01234567, vout6x01234567_7x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_u8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_u8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_u8(c4, vout4x01234567_5x01234567, 0);
+        vst1q_lane_u8(c5, vout4x01234567_5x01234567, 8);
+        vst1q_lane_u8(c6, vout6x01234567_7x01234567, 0);
+        vst1q_lane_u8(c7, vout6x01234567_7x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qu8-igemm/c4-neondot.c.in b/src/qu8-igemm/c4-neondot.c.in
new file mode 100644
index 0000000..75e58a9
--- /dev/null
+++ b/src/qu8-igemm/c4-neondot.c.in
@@ -0,0 +1,312 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+$assert NR % 8 == 0
+$assert 8 <= NR <= 16
+$assert REQUANTIZATION == "RNDNU"
+$assert DATATYPE == "QU8"
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/igemm.h>
+$if REQUANTIZATION == "FP32":
+  #include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+
+$PARAMS_STRUCT = "fp32_neonv8" if REQUANTIZATION == "FP32" else REQUANTIZATION.lower() + "_neon"
+void xnn_${DATATYPE.lower()}_igemm_minmax_${REQUANTIZATION.lower()}_ukernel_${MR}x${NR}c4__neondot(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const uint8_t** restrict a,
+    const void* restrict w,
+    uint8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const uint8_t* zero,
+    const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= ${MR});
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (${MR} * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(uint8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 4 * sizeof(uint8_t));
+  uint8_t* c0 = c;
+  $for M in range(1, MR):
+    uint8_t* c${M} = (uint8_t*) ((uintptr_t) c${M-1} + cm_stride);
+    $if M % 2 == 0:
+      if XNN_UNPREDICTABLE(mr <= ${M}) {
+        c${M} = c${M-1};
+      }
+    $elif M + 1 == MR:
+      if XNN_UNPREDICTABLE(mr != ${M+1}) {
+        c${M} = c${M-1};
+      }
+    $else:
+      if XNN_UNPREDICTABLE(mr < ${M+1}) {
+        c${M} = c${M-1};
+      }
+
+  const uint8x16_t vb_zero_point = vld1q_dup_u8(&params->${PARAMS_STRUCT}.kernel_zero_point[0]);
+
+  do {
+    // Initialize accumulators with bias. ${NR} bias values are loaded from the
+    // weight matrix, at the start of the group of ${NR} columns.
+    $for N in range(0, NR, 4):
+      uint32x4_t vpacc0x${ABC[N:N+4]} = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    $for M in range(1, MR):
+      $for N in range(0, NR, 4):
+        uint32x4_t vpacc${M}x${ABC[N:N+4]} = vpacc0x${ABC[N:N+4]};
+    $for M in range(0, MR):
+      $for N in range(0, NR, 4):
+        uint32x4_t vnacc${M}x${ABC[N:N+4]} = vmovq_n_u32(0);
+
+    size_t p = ks;
+    do {
+      $for M in range(MR):
+        const uint8_t* restrict a${M} = a[${M}];
+        if XNN_UNPREDICTABLE(a${M} != zero) {
+          a${M} = (const uint8_t*) ((uintptr_t) a${M} + a_offset);
+        }
+      a += ${MR};
+
+      // Inner accumulation loop along the ${NR} columns.
+      size_t k = kc;
+      // 2x partial unrolled loop to load 8 bytes at a time.
+      while (k >= 8 * sizeof(uint8_t)) {
+        // Load a ${MR}x8 block of activations.
+        $for M in range(MR):
+          const uint8x8_t va${M}x01234567 = vld1_u8(a${M}); a${M} += 8;
+
+        // Load a 8x${NR} block of weights.
+        $for K in range(0, 8, 4):
+          $for N in range(0, NR, 4):
+            const uint8x16_t vb${ABC[K:K+4]}x${ABC[N:N+4]} = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+        // Multiply-accumulate: ${MR}x8 * 8x${NR} --> ${MR}x${NR}.
+        $for K in range(0, 8, 4):
+          $for M in range(MR):
+            $for N in range(0, NR, 4):
+              vpacc${M}x${ABC[N:N+4]} = vdotq_lane_u32(vpacc${M}x${ABC[N:N+4]}, vb${ABC[K:K+4]}x${ABC[N:N+4]}, va${M}x01234567, ${K//4});
+              vnacc${M}x${ABC[N:N+4]} = vdotq_lane_u32(vnacc${M}x${ABC[N:N+4]}, vb_zero_point, va${M}x01234567, ${K//4});
+
+        k -= 8 * sizeof(uint8_t);
+      }
+      // Handle up to 4 final positions of `k`
+      if XNN_UNLIKELY(k != 0) {
+        // Load a ${MR}x4 block of activations.
+        $for M in range(MR):
+          const uint8x8_t va${M}x01234567 = vld1_u8(a${M});
+
+        // Load a 4x${NR} block of weights.
+        $for N in range(0, NR, 4):
+          const uint8x16_t vb0123x${ABC[N:N+4]} = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+        // Multiply-accumulate: ${MR}x4 * 4x${NR} --> ${MR}x${NR}.
+        $for M in range(MR):
+          $for N in range(0, NR, 4):
+            vpacc${M}x${ABC[N:N+4]} = vdotq_lane_u32(vpacc${M}x${ABC[N:N+4]}, vb0123x${ABC[N:N+4]}, va${M}x01234567, 0);
+            vnacc${M}x${ABC[N:N+4]} = vdotq_lane_u32(vnacc${M}x${ABC[N:N+4]}, vb_zero_point, va${M}x01234567, 0);
+      }
+      p -= ${MR} * sizeof(void*);
+    } while (p != 0);
+
+    // Subtract zero point accumulators with accumulators.
+    $for M in range(0, MR):
+      $for N in range(0, NR, 4):
+        int32x4_t vacc${M}x${ABC[N:N+4]} = vreinterpretq_s32_u32(vsubq_u32(vpacc${M}x${ABC[N:N+4]}, vnacc${M}x${ABC[N:N+4]}));
+
+    $if REQUANTIZATION == "GEMMLOWP":
+      const int32x4_t vmultiplier = vld1q_dup_s32(&params->${PARAMS_STRUCT}.multiplier);
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vacc${M}x${ABC[N:N+4]} = vqrdmulhq_s32(vacc${M}x${ABC[N:N+4]}, vmultiplier);
+
+      const int32x4_t vright_shift = vld1q_dup_s32(&params->${PARAMS_STRUCT}.right_shift);
+      const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0)));
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vacc${M}x${ABC[N:N+4]} = vsraq_n_s32(vacc${M}x${ABC[N:N+4]}, vbicq_s32(vacc${M}x${ABC[N:N+4]}, vzero_shift_mask), 31);
+
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vacc${M}x${ABC[N:N+4]} = vrshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_shift);
+    $elif REQUANTIZATION == "RNDNU":
+      const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->${PARAMS_STRUCT}.right_pre_shift);
+      const int32x4_t vmultiplier = vld1q_dup_s32(&params->${PARAMS_STRUCT}.multiplier);
+      const int32x4_t vright_post_shift = vld1q_dup_s32(&params->${PARAMS_STRUCT}.right_post_shift);
+
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vacc${M}x${ABC[N:N+4]} = vshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_pre_shift);
+
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vacc${M}x${ABC[N:N+4]} = vqdmulhq_s32(vacc${M}x${ABC[N:N+4]}, vmultiplier);
+
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vacc${M}x${ABC[N:N+4]} = vrshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_post_shift);
+    $elif REQUANTIZATION == "FP32":
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          float32x4_t vfpacc${M}x${ABC[N:N+4]} = vcvtq_f32_s32(vacc${M}x${ABC[N:N+4]});
+
+      const float32x4_t vscale = vld1q_dup_f32(&params->${PARAMS_STRUCT}.scale);
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vfpacc${M}x${ABC[N:N+4]} = vmulq_f32(vfpacc${M}x${ABC[N:N+4]}, vscale);
+
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vacc${M}x${ABC[N:N+4]} = vcvtnq_s32_f32(vfpacc${M}x${ABC[N:N+4]});
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->${PARAMS_STRUCT}.output_zero_point);
+#if XNN_ARCH_ARM64
+    $for M in range(MR):
+      $for N in range(0, NR, 8):
+        const int16x8_t vacc${M}x${ABC[N:N+8]} = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc${M}x${ABC[N:N+4]}), vacc${M}x${ABC[N+4:N+8]}), voutput_zero_point);
+
+    $for M in range(MR):
+      $for N in range(0, NR, 16):
+        $if N + 8 < NR:
+          uint8x16_t vout${M}x${ABC[N:N+16]} = vqmovun_high_s16(vqmovun_s16(vacc${M}x${ABC[N:N+8]}), vacc${M}x${ABC[N+8:N+16]});
+        $elif M % 2 == 1:
+          uint8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vqmovun_high_s16(vqmovun_s16(vacc${M-1}x${ABC[N:N+8]}), vacc${M}x${ABC[N:N+8]});
+        $elif M + 1 == MR:
+          uint8x8_t vout${M}x${ABC[N:N+8]} = vqmovun_s16(vacc${M}x${ABC[N:N+8]});
+#else
+    $for M in range(MR):
+      $for N in range(0, NR, 8):
+        const int16x8_t vacc${M}x${ABC[N:N+8]} = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc${M}x${ABC[N:N+4]}), vqmovn_s32(vacc${M}x${ABC[N+4:N+8]})), voutput_zero_point);
+
+    $for M in range(MR):
+      $for N in range(0, NR, 16):
+        $if N + 8 < NR:
+          uint8x16_t vout${M}x${ABC[N:N+16]} = vcombine_u8(vqmovun_s16(vacc${M}x${ABC[N:N+8]}), vqmovun_s16(vacc${M}x${ABC[N+8:N+16]}));
+        $elif M % 2 == 1:
+          uint8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vcombine_u8(vqmovun_s16(vacc${M-1}x${ABC[N:N+8]}), vqmovun_s16(vacc${M}x${ABC[N:N+8]}));
+        $elif M + 1 == MR:
+          uint8x8_t vout${M}x${ABC[N:N+8]} = vqmovun_s16(vacc${M}x${ABC[N:N+8]});
+#endif
+    $if NR == 8 and MR == 1:
+      const uint8x8_t voutput_min = vld1_dup_u8(&params->${PARAMS_STRUCT}.output_min);
+      const uint8x8_t voutput_max = vld1_dup_u8(&params->${PARAMS_STRUCT}.output_max);
+    $else:
+      const uint8x16_t voutput_min = vld1q_dup_u8(&params->${PARAMS_STRUCT}.output_min);
+      const uint8x16_t voutput_max = vld1q_dup_u8(&params->${PARAMS_STRUCT}.output_max);
+
+    $for M in range(MR):
+      $for N in range(0, NR, 16):
+        $if N + 8 < NR:
+          vout${M}x${ABC[N:N+16]} = vmaxq_u8(vout${M}x${ABC[N:N+16]}, voutput_min);
+        $elif M % 2 == 1:
+          vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vmaxq_u8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}, voutput_min);
+        $elif M + 1 == MR:
+          $if NR == 8 and MR == 1:
+            vout${M}x${ABC[N:N+8]} = vmax_u8(vout${M}x${ABC[N:N+8]}, voutput_min);
+          $else:
+            vout${M}x${ABC[N:N+8]} = vmax_u8(vout${M}x${ABC[N:N+8]}, vget_low_u8(voutput_min));
+
+    $for M in range(MR):
+      $for N in range(0, NR, 16):
+        $if N + 8 < NR:
+          vout${M}x${ABC[N:N+16]} = vminq_u8(vout${M}x${ABC[N:N+16]}, voutput_max);
+        $elif M % 2 == 1:
+          vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vminq_u8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}, voutput_max);
+        $elif M + 1 == MR:
+          $if NR == 8 and MR == 1:
+            vout${M}x${ABC[N:N+8]} = vmin_u8(vout${M}x${ABC[N:N+8]}, voutput_max);
+          $else:
+            vout${M}x${ABC[N:N+8]} = vmin_u8(vout${M}x${ABC[N:N+8]}, vget_low_u8(voutput_max));
+
+    if (nc >= ${NR}) {
+      $for M in reversed(range(MR)):
+        $for N in range(0, NR, 16):
+          $if N + 8 < NR:
+            vst1q_u8(c${M} + ${N}, vout${M}x${ABC[N:N+16]});
+          $elif M % 2 == 1:
+            vst1_u8(c${M} + ${N}, vget_high_u8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}));
+            vst1_u8(c${M-1} + ${N}, vget_low_u8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}));
+          $elif M + 1 == MR:
+            vst1_u8(c${M} + ${N}, vout${M}x${ABC[N:N+8]});
+
+      $for M in reversed(range(MR)):
+        c${M} = (uint8_t*) ((uintptr_t) c${M} + cn_stride);
+
+      a = (const uint8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= ${NR};
+    } else {
+      $if NR == 16:
+        $for M in reversed(range(MR)):
+          $if M % 2 == 1:
+            uint8x16_t vout${M-1}x01234567_${M}x01234567 = vcombine_u8(vget_low_u8(vout${M-1}x0123456789ABCDEF), vget_low_u8(vout${M}x0123456789ABCDEF));
+          $elif M + 1 == MR:
+            uint8x8_t vout${M}x01234567 = vget_low_u8(vout${M}x0123456789ABCDEF);
+        if (nc & 8) {
+          $for M in reversed(range(MR)):
+            $if M % 2 == 1:
+              vst1_u8(c${M}, vget_high_u8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]})); c${M} += 8;
+              vst1_u8(c${M-1}, vget_low_u8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]})); c${M-1} += 8;
+            $elif M + 1 == MR:
+              vst1_u8(c${M}, vout${M}x${ABC[N:N+8]}); c${M} += 8;
+          $for M in reversed(range(MR)):
+            $if M % 2 == 1:
+              vout${M-1}x01234567_${M}x01234567 = vcombine_u8(vget_high_u8(vout${M-1}x0123456789ABCDEF), vget_high_u8(vout${M}x0123456789ABCDEF));
+            $elif M + 1 == MR:
+              vout${M}x01234567 = vget_high_u8(vout${M}x0123456789ABCDEF);
+        }
+      if (nc & 4) {
+        $for M in reversed(range(MR)):
+          $if M % 2 == 1:
+            vst1q_lane_u32(__builtin_assume_aligned(c${M}, 1), vreinterpretq_u32_u8(vout${M-1}x01234567_${M}x01234567), 2); c${M} += 4;
+            vst1q_lane_u32(__builtin_assume_aligned(c${M-1}, 1), vreinterpretq_u32_u8(vout${M-1}x01234567_${M}x01234567), 0); c${M-1} += 4;
+          $elif M + 1 == MR:
+            vst1_lane_u32(__builtin_assume_aligned(c${M}, 1), vreinterpret_u32_u8(vout${M}x01234567), 0); c${M} += 4;
+        $for M in reversed(range(MR)):
+          $if M % 2 == 1:
+            vout${M-1}x01234567_${M}x01234567 = vextq_u8(vout${M-1}x01234567_${M}x01234567, vout${M-1}x01234567_${M}x01234567, 4);
+          $elif M + 1 == MR:
+            vout${M}x01234567 = vext_u8(vout${M}x01234567, vout${M}x01234567, 4);
+      }
+      if (nc & 2) {
+        $for M in reversed(range(MR)):
+          $if M % 2 == 1:
+            vst1q_lane_u16(__builtin_assume_aligned(c${M}, 1), vreinterpretq_u16_u8(vout${M-1}x01234567_${M}x01234567), 4); c${M} += 2;
+            vst1q_lane_u16(__builtin_assume_aligned(c${M-1}, 1), vreinterpretq_u16_u8(vout${M-1}x01234567_${M}x01234567), 0); c${M-1} += 2;
+          $elif M + 1 == MR:
+            vst1_lane_u16(__builtin_assume_aligned(c${M}, 1), vreinterpret_u16_u8(vout${M}x01234567), 0); c${M} += 2;
+        $for M in range(MR):
+          $if M % 2 == 1:
+            vout${M-1}x01234567_${M}x01234567 = vextq_u8(vout${M-1}x01234567_${M}x01234567, vout${M-1}x01234567_${M}x01234567, 2);
+          $elif M + 1 == MR:
+            vout${M}x01234567 = vext_u8(vout${M}x01234567, vout${M}x01234567, 2);
+      }
+      if (nc & 1) {
+        $for M in reversed(range(MR)):
+          $if M % 2 == 1:
+            vst1q_lane_u8(c${M}, vout${M-1}x01234567_${M}x01234567, 8);
+            vst1q_lane_u8(c${M-1}, vout${M-1}x01234567_${M}x01234567, 0);
+          $elif M + 1 == MR:
+            vst1_lane_u8(c${M}, vout${M}x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qu8-igemm/gen/1x16c4-minmax-rndnu-neondot.c b/src/qu8-igemm/gen/1x16c4-minmax-rndnu-neondot.c
new file mode 100644
index 0000000..8e05c25
--- /dev/null
+++ b/src/qu8-igemm/gen/1x16c4-minmax-rndnu-neondot.c
@@ -0,0 +1,202 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qu8-igemm/c4-neondot.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/igemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const uint8_t** restrict a,
+    const void* restrict w,
+    uint8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const uint8_t* zero,
+    const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(uint8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 4 * sizeof(uint8_t));
+  uint8_t* c0 = c;
+
+  const uint8x16_t vb_zero_point = vld1q_dup_u8(&params->rndnu_neon.kernel_zero_point[0]);
+
+  do {
+    // Initialize accumulators with bias. 16 bias values are loaded from the
+    // weight matrix, at the start of the group of 16 columns.
+    uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0x89AB = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0xCDEF = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vnacc0x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc0x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc0x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc0xCDEF = vmovq_n_u32(0);
+
+    size_t p = ks;
+    do {
+      const uint8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      // Inner accumulation loop along the 16 columns.
+      size_t k = kc;
+      // 2x partial unrolled loop to load 8 bytes at a time.
+      while (k >= 8 * sizeof(uint8_t)) {
+        // Load a 1x8 block of activations.
+        const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
+
+        // Load a 8x16 block of weights.
+        const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb4567x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb4567xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+        // Multiply-accumulate: 1x8 * 8x16 --> 1x16.
+        vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+        vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+        vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+        vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+        vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
+        vnacc0x89AB = vdotq_lane_u32(vnacc0x89AB, vb_zero_point, va0x01234567, 0);
+        vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
+        vnacc0xCDEF = vdotq_lane_u32(vnacc0xCDEF, vb_zero_point, va0x01234567, 0);
+        vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
+        vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 1);
+        vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
+        vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 1);
+        vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb4567x89AB, va0x01234567, 1);
+        vnacc0x89AB = vdotq_lane_u32(vnacc0x89AB, vb_zero_point, va0x01234567, 1);
+        vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb4567xCDEF, va0x01234567, 1);
+        vnacc0xCDEF = vdotq_lane_u32(vnacc0xCDEF, vb_zero_point, va0x01234567, 1);
+
+        k -= 8 * sizeof(uint8_t);
+      }
+      // Handle up to 4 final positions of `k`
+      if XNN_UNLIKELY(k != 0) {
+        // Load a 1x4 block of activations.
+        const uint8x8_t va0x01234567 = vld1_u8(a0);
+
+        // Load a 4x16 block of weights.
+        const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+        // Multiply-accumulate: 1x4 * 4x16 --> 1x16.
+        vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+        vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+        vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+        vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+        vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
+        vnacc0x89AB = vdotq_lane_u32(vnacc0x89AB, vb_zero_point, va0x01234567, 0);
+        vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
+        vnacc0xCDEF = vdotq_lane_u32(vnacc0xCDEF, vb_zero_point, va0x01234567, 0);
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    // Subtract zero point accumulators with accumulators.
+    int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
+    int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x4567));
+    int32x4_t vacc0x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc0x89AB, vnacc0x89AB));
+    int32x4_t vacc0xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc0xCDEF, vnacc0xCDEF));
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+
+    uint8x16_t vout0x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc0x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+
+    uint8x16_t vout0x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc0x89ABCDEF));
+#endif
+    const uint8x16_t voutput_min = vld1q_dup_u8(&params->rndnu_neon.output_min);
+    const uint8x16_t voutput_max = vld1q_dup_u8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_u8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_u8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_u8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const uint8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      uint8x8_t vout0x01234567 = vget_low_u8(vout0x0123456789ABCDEF);
+      if (nc & 8) {
+        vst1_u8(c0, vout0x01234567); c0 += 8;
+        vout0x01234567 = vget_high_u8(vout0x0123456789ABCDEF);
+      }
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_u8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_u8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_u8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_u8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_u8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qu8-igemm/gen/1x8c4-minmax-rndnu-neondot.c b/src/qu8-igemm/gen/1x8c4-minmax-rndnu-neondot.c
new file mode 100644
index 0000000..5e22bf1
--- /dev/null
+++ b/src/qu8-igemm/gen/1x8c4-minmax-rndnu-neondot.c
@@ -0,0 +1,165 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qu8-igemm/c4-neondot.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/igemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const uint8_t** restrict a,
+    const void* restrict w,
+    uint8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const uint8_t* zero,
+    const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(uint8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 4 * sizeof(uint8_t));
+  uint8_t* c0 = c;
+
+  const uint8x16_t vb_zero_point = vld1q_dup_u8(&params->rndnu_neon.kernel_zero_point[0]);
+
+  do {
+    // Initialize accumulators with bias. 8 bias values are loaded from the
+    // weight matrix, at the start of the group of 8 columns.
+    uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vnacc0x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc0x4567 = vmovq_n_u32(0);
+
+    size_t p = ks;
+    do {
+      const uint8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      // Inner accumulation loop along the 8 columns.
+      size_t k = kc;
+      // 2x partial unrolled loop to load 8 bytes at a time.
+      while (k >= 8 * sizeof(uint8_t)) {
+        // Load a 1x8 block of activations.
+        const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
+
+        // Load a 8x8 block of weights.
+        const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+        // Multiply-accumulate: 1x8 * 8x8 --> 1x8.
+        vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+        vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+        vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+        vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+        vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
+        vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 1);
+        vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
+        vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 1);
+
+        k -= 8 * sizeof(uint8_t);
+      }
+      // Handle up to 4 final positions of `k`
+      if XNN_UNLIKELY(k != 0) {
+        // Load a 1x4 block of activations.
+        const uint8x8_t va0x01234567 = vld1_u8(a0);
+
+        // Load a 4x8 block of weights.
+        const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+        // Multiply-accumulate: 1x4 * 4x8 --> 1x8.
+        vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+        vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+        vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+        vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    // Subtract zero point accumulators with accumulators.
+    int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
+    int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x4567));
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    uint8x8_t vout0x01234567 = vqmovun_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    uint8x8_t vout0x01234567 = vqmovun_s16(vacc0x01234567);
+#endif
+    const uint8x8_t voutput_min = vld1_dup_u8(&params->rndnu_neon.output_min);
+    const uint8x8_t voutput_max = vld1_dup_u8(&params->rndnu_neon.output_max);
+
+    vout0x01234567 = vmax_u8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_u8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_u8(c0 + 0, vout0x01234567);
+
+      c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const uint8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_u8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_u8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_u8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_u8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_u8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qu8-igemm/gen/4x16c4-minmax-rndnu-neondot.c b/src/qu8-igemm/gen/4x16c4-minmax-rndnu-neondot.c
new file mode 100644
index 0000000..1323ef3
--- /dev/null
+++ b/src/qu8-igemm/gen/4x16c4-minmax-rndnu-neondot.c
@@ -0,0 +1,422 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qu8-igemm/c4-neondot.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/igemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const uint8_t** restrict a,
+    const void* restrict w,
+    uint8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const uint8_t* zero,
+    const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(uint8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 4 * sizeof(uint8_t));
+  uint8_t* c0 = c;
+  uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  const uint8x16_t vb_zero_point = vld1q_dup_u8(&params->rndnu_neon.kernel_zero_point[0]);
+
+  do {
+    // Initialize accumulators with bias. 16 bias values are loaded from the
+    // weight matrix, at the start of the group of 16 columns.
+    uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0x89AB = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0xCDEF = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc1x0123 = vpacc0x0123;
+    uint32x4_t vpacc1x4567 = vpacc0x4567;
+    uint32x4_t vpacc1x89AB = vpacc0x89AB;
+    uint32x4_t vpacc1xCDEF = vpacc0xCDEF;
+    uint32x4_t vpacc2x0123 = vpacc0x0123;
+    uint32x4_t vpacc2x4567 = vpacc0x4567;
+    uint32x4_t vpacc2x89AB = vpacc0x89AB;
+    uint32x4_t vpacc2xCDEF = vpacc0xCDEF;
+    uint32x4_t vpacc3x0123 = vpacc0x0123;
+    uint32x4_t vpacc3x4567 = vpacc0x4567;
+    uint32x4_t vpacc3x89AB = vpacc0x89AB;
+    uint32x4_t vpacc3xCDEF = vpacc0xCDEF;
+    uint32x4_t vnacc0x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc0x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc0x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc0xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc1x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc1x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc1x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc1xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc2x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc2x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc2x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc2xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc3x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc3x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc3x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc3xCDEF = vmovq_n_u32(0);
+
+    size_t p = ks;
+    do {
+      const uint8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const uint8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const uint8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      const uint8_t* restrict a3 = a[3];
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      // Inner accumulation loop along the 16 columns.
+      size_t k = kc;
+      // 2x partial unrolled loop to load 8 bytes at a time.
+      while (k >= 8 * sizeof(uint8_t)) {
+        // Load a 4x8 block of activations.
+        const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
+        const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
+        const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 8;
+        const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 8;
+
+        // Load a 8x16 block of weights.
+        const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb4567x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb4567xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+        // Multiply-accumulate: 4x8 * 8x16 --> 4x16.
+        vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+        vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+        vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+        vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+        vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
+        vnacc0x89AB = vdotq_lane_u32(vnacc0x89AB, vb_zero_point, va0x01234567, 0);
+        vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
+        vnacc0xCDEF = vdotq_lane_u32(vnacc0xCDEF, vb_zero_point, va0x01234567, 0);
+        vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
+        vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 0);
+        vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
+        vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 0);
+        vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
+        vnacc1x89AB = vdotq_lane_u32(vnacc1x89AB, vb_zero_point, va1x01234567, 0);
+        vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
+        vnacc1xCDEF = vdotq_lane_u32(vnacc1xCDEF, vb_zero_point, va1x01234567, 0);
+        vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
+        vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 0);
+        vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
+        vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 0);
+        vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
+        vnacc2x89AB = vdotq_lane_u32(vnacc2x89AB, vb_zero_point, va2x01234567, 0);
+        vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
+        vnacc2xCDEF = vdotq_lane_u32(vnacc2xCDEF, vb_zero_point, va2x01234567, 0);
+        vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
+        vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 0);
+        vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
+        vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 0);
+        vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb0123x89AB, va3x01234567, 0);
+        vnacc3x89AB = vdotq_lane_u32(vnacc3x89AB, vb_zero_point, va3x01234567, 0);
+        vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
+        vnacc3xCDEF = vdotq_lane_u32(vnacc3xCDEF, vb_zero_point, va3x01234567, 0);
+        vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
+        vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 1);
+        vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
+        vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 1);
+        vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb4567x89AB, va0x01234567, 1);
+        vnacc0x89AB = vdotq_lane_u32(vnacc0x89AB, vb_zero_point, va0x01234567, 1);
+        vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb4567xCDEF, va0x01234567, 1);
+        vnacc0xCDEF = vdotq_lane_u32(vnacc0xCDEF, vb_zero_point, va0x01234567, 1);
+        vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
+        vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 1);
+        vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
+        vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 1);
+        vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb4567x89AB, va1x01234567, 1);
+        vnacc1x89AB = vdotq_lane_u32(vnacc1x89AB, vb_zero_point, va1x01234567, 1);
+        vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb4567xCDEF, va1x01234567, 1);
+        vnacc1xCDEF = vdotq_lane_u32(vnacc1xCDEF, vb_zero_point, va1x01234567, 1);
+        vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb4567x0123, va2x01234567, 1);
+        vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 1);
+        vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb4567x4567, va2x01234567, 1);
+        vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 1);
+        vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb4567x89AB, va2x01234567, 1);
+        vnacc2x89AB = vdotq_lane_u32(vnacc2x89AB, vb_zero_point, va2x01234567, 1);
+        vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb4567xCDEF, va2x01234567, 1);
+        vnacc2xCDEF = vdotq_lane_u32(vnacc2xCDEF, vb_zero_point, va2x01234567, 1);
+        vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb4567x0123, va3x01234567, 1);
+        vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 1);
+        vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb4567x4567, va3x01234567, 1);
+        vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 1);
+        vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb4567x89AB, va3x01234567, 1);
+        vnacc3x89AB = vdotq_lane_u32(vnacc3x89AB, vb_zero_point, va3x01234567, 1);
+        vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb4567xCDEF, va3x01234567, 1);
+        vnacc3xCDEF = vdotq_lane_u32(vnacc3xCDEF, vb_zero_point, va3x01234567, 1);
+
+        k -= 8 * sizeof(uint8_t);
+      }
+      // Handle up to 4 final positions of `k`
+      if XNN_UNLIKELY(k != 0) {
+        // Load a 4x4 block of activations.
+        const uint8x8_t va0x01234567 = vld1_u8(a0);
+        const uint8x8_t va1x01234567 = vld1_u8(a1);
+        const uint8x8_t va2x01234567 = vld1_u8(a2);
+        const uint8x8_t va3x01234567 = vld1_u8(a3);
+
+        // Load a 4x16 block of weights.
+        const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+        // Multiply-accumulate: 4x4 * 4x16 --> 4x16.
+        vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+        vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+        vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+        vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+        vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
+        vnacc0x89AB = vdotq_lane_u32(vnacc0x89AB, vb_zero_point, va0x01234567, 0);
+        vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
+        vnacc0xCDEF = vdotq_lane_u32(vnacc0xCDEF, vb_zero_point, va0x01234567, 0);
+        vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
+        vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 0);
+        vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
+        vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 0);
+        vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
+        vnacc1x89AB = vdotq_lane_u32(vnacc1x89AB, vb_zero_point, va1x01234567, 0);
+        vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
+        vnacc1xCDEF = vdotq_lane_u32(vnacc1xCDEF, vb_zero_point, va1x01234567, 0);
+        vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
+        vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 0);
+        vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
+        vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 0);
+        vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
+        vnacc2x89AB = vdotq_lane_u32(vnacc2x89AB, vb_zero_point, va2x01234567, 0);
+        vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
+        vnacc2xCDEF = vdotq_lane_u32(vnacc2xCDEF, vb_zero_point, va2x01234567, 0);
+        vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
+        vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 0);
+        vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
+        vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 0);
+        vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb0123x89AB, va3x01234567, 0);
+        vnacc3x89AB = vdotq_lane_u32(vnacc3x89AB, vb_zero_point, va3x01234567, 0);
+        vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
+        vnacc3xCDEF = vdotq_lane_u32(vnacc3xCDEF, vb_zero_point, va3x01234567, 0);
+      }
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+    // Subtract zero point accumulators with accumulators.
+    int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
+    int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x4567));
+    int32x4_t vacc0x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc0x89AB, vnacc0x89AB));
+    int32x4_t vacc0xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc0xCDEF, vnacc0xCDEF));
+    int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
+    int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x4567));
+    int32x4_t vacc1x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc1x89AB, vnacc1x89AB));
+    int32x4_t vacc1xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc1xCDEF, vnacc1xCDEF));
+    int32x4_t vacc2x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x0123, vnacc2x0123));
+    int32x4_t vacc2x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x4567, vnacc2x4567));
+    int32x4_t vacc2x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc2x89AB, vnacc2x89AB));
+    int32x4_t vacc2xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc2xCDEF, vnacc2xCDEF));
+    int32x4_t vacc3x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x0123, vnacc3x0123));
+    int32x4_t vacc3x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x4567, vnacc3x4567));
+    int32x4_t vacc3x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc3x89AB, vnacc3x89AB));
+    int32x4_t vacc3xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc3xCDEF, vnacc3xCDEF));
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+    vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift);
+    vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+    vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
+    vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+    vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
+    vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
+
+    uint8x16_t vout0x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc0x89ABCDEF);
+    uint8x16_t vout1x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc1x01234567), vacc1x89ABCDEF);
+    uint8x16_t vout2x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc2x01234567), vacc2x89ABCDEF);
+    uint8x16_t vout3x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc3x01234567), vacc3x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
+
+    uint8x16_t vout0x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc0x89ABCDEF));
+    uint8x16_t vout1x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc1x01234567), vqmovun_s16(vacc1x89ABCDEF));
+    uint8x16_t vout2x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc2x01234567), vqmovun_s16(vacc2x89ABCDEF));
+    uint8x16_t vout3x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc3x01234567), vqmovun_s16(vacc3x89ABCDEF));
+#endif
+    const uint8x16_t voutput_min = vld1q_dup_u8(&params->rndnu_neon.output_min);
+    const uint8x16_t voutput_max = vld1q_dup_u8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_u8(vout0x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_u8(vout1x0123456789ABCDEF, voutput_min);
+    vout2x0123456789ABCDEF = vmaxq_u8(vout2x0123456789ABCDEF, voutput_min);
+    vout3x0123456789ABCDEF = vmaxq_u8(vout3x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_u8(vout0x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_u8(vout1x0123456789ABCDEF, voutput_max);
+    vout2x0123456789ABCDEF = vminq_u8(vout2x0123456789ABCDEF, voutput_max);
+    vout3x0123456789ABCDEF = vminq_u8(vout3x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_u8(c3 + 0, vout3x0123456789ABCDEF);
+      vst1q_u8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_u8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_u8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
+      c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const uint8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      uint8x16_t vout2x01234567_3x01234567 = vcombine_u8(vget_low_u8(vout2x0123456789ABCDEF), vget_low_u8(vout3x0123456789ABCDEF));
+      uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vget_low_u8(vout0x0123456789ABCDEF), vget_low_u8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_u8(c3, vget_high_u8(vout2x01234567_3x01234567)); c3 += 8;
+        vst1_u8(c2, vget_low_u8(vout2x01234567_3x01234567)); c2 += 8;
+        vst1_u8(c1, vget_high_u8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_u8(c0, vget_low_u8(vout0x01234567_1x01234567)); c0 += 8;
+        vout2x01234567_3x01234567 = vcombine_u8(vget_high_u8(vout2x0123456789ABCDEF), vget_high_u8(vout3x0123456789ABCDEF));
+        vout0x01234567_1x01234567 = vcombine_u8(vget_high_u8(vout0x0123456789ABCDEF), vget_high_u8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_u8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_u8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_u8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_u8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qu8-igemm/gen/4x8c4-minmax-rndnu-neondot.c b/src/qu8-igemm/gen/4x8c4-minmax-rndnu-neondot.c
new file mode 100644
index 0000000..fc5b08e
--- /dev/null
+++ b/src/qu8-igemm/gen/4x8c4-minmax-rndnu-neondot.c
@@ -0,0 +1,294 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qu8-igemm/c4-neondot.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/igemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const uint8_t** restrict a,
+    const void* restrict w,
+    uint8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const uint8_t* zero,
+    const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(uint8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 4 * sizeof(uint8_t));
+  uint8_t* c0 = c;
+  uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  const uint8x16_t vb_zero_point = vld1q_dup_u8(&params->rndnu_neon.kernel_zero_point[0]);
+
+  do {
+    // Initialize accumulators with bias. 8 bias values are loaded from the
+    // weight matrix, at the start of the group of 8 columns.
+    uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc1x0123 = vpacc0x0123;
+    uint32x4_t vpacc1x4567 = vpacc0x4567;
+    uint32x4_t vpacc2x0123 = vpacc0x0123;
+    uint32x4_t vpacc2x4567 = vpacc0x4567;
+    uint32x4_t vpacc3x0123 = vpacc0x0123;
+    uint32x4_t vpacc3x4567 = vpacc0x4567;
+    uint32x4_t vnacc0x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc0x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc1x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc1x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc2x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc2x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc3x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc3x4567 = vmovq_n_u32(0);
+
+    size_t p = ks;
+    do {
+      const uint8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const uint8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const uint8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      const uint8_t* restrict a3 = a[3];
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      // Inner accumulation loop along the 8 columns.
+      size_t k = kc;
+      // 2x partial unrolled loop to load 8 bytes at a time.
+      while (k >= 8 * sizeof(uint8_t)) {
+        // Load a 4x8 block of activations.
+        const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
+        const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
+        const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 8;
+        const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 8;
+
+        // Load a 8x8 block of weights.
+        const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+        // Multiply-accumulate: 4x8 * 8x8 --> 4x8.
+        vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+        vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+        vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+        vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+        vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
+        vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 0);
+        vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
+        vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 0);
+        vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
+        vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 0);
+        vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
+        vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 0);
+        vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
+        vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 0);
+        vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
+        vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 0);
+        vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
+        vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 1);
+        vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
+        vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 1);
+        vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
+        vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 1);
+        vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
+        vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 1);
+        vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb4567x0123, va2x01234567, 1);
+        vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 1);
+        vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb4567x4567, va2x01234567, 1);
+        vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 1);
+        vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb4567x0123, va3x01234567, 1);
+        vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 1);
+        vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb4567x4567, va3x01234567, 1);
+        vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 1);
+
+        k -= 8 * sizeof(uint8_t);
+      }
+      // Handle up to 4 final positions of `k`
+      if XNN_UNLIKELY(k != 0) {
+        // Load a 4x4 block of activations.
+        const uint8x8_t va0x01234567 = vld1_u8(a0);
+        const uint8x8_t va1x01234567 = vld1_u8(a1);
+        const uint8x8_t va2x01234567 = vld1_u8(a2);
+        const uint8x8_t va3x01234567 = vld1_u8(a3);
+
+        // Load a 4x8 block of weights.
+        const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+        // Multiply-accumulate: 4x4 * 4x8 --> 4x8.
+        vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+        vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+        vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+        vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+        vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
+        vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 0);
+        vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
+        vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 0);
+        vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
+        vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 0);
+        vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
+        vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 0);
+        vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
+        vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 0);
+        vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
+        vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 0);
+      }
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+    // Subtract zero point accumulators with accumulators.
+    int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
+    int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x4567));
+    int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
+    int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x4567));
+    int32x4_t vacc2x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x0123, vnacc2x0123));
+    int32x4_t vacc2x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x4567, vnacc2x4567));
+    int32x4_t vacc3x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x0123, vnacc3x0123));
+    int32x4_t vacc3x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x4567, vnacc3x4567));
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+
+    uint8x16_t vout0x01234567_1x01234567 = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc1x01234567);
+    uint8x16_t vout2x01234567_3x01234567 = vqmovun_high_s16(vqmovun_s16(vacc2x01234567), vacc3x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+
+    uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc1x01234567));
+    uint8x16_t vout2x01234567_3x01234567 = vcombine_u8(vqmovun_s16(vacc2x01234567), vqmovun_s16(vacc3x01234567));
+#endif
+    const uint8x16_t voutput_min = vld1q_dup_u8(&params->rndnu_neon.output_min);
+    const uint8x16_t voutput_max = vld1q_dup_u8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_u8(vout0x01234567_1x01234567, voutput_min);
+    vout2x01234567_3x01234567 = vmaxq_u8(vout2x01234567_3x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_u8(vout0x01234567_1x01234567, voutput_max);
+    vout2x01234567_3x01234567 = vminq_u8(vout2x01234567_3x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_u8(c3 + 0, vget_high_u8(vout2x01234567_3x01234567));
+      vst1_u8(c2 + 0, vget_low_u8(vout2x01234567_3x01234567));
+      vst1_u8(c1 + 0, vget_high_u8(vout0x01234567_1x01234567));
+      vst1_u8(c0 + 0, vget_low_u8(vout0x01234567_1x01234567));
+
+      c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
+      c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const uint8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_u8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_u8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_u8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_u8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qu8-igemm/gen/6x16c4-minmax-rndnu-neondot.c b/src/qu8-igemm/gen/6x16c4-minmax-rndnu-neondot.c
new file mode 100644
index 0000000..eff51d6
--- /dev/null
+++ b/src/qu8-igemm/gen/6x16c4-minmax-rndnu-neondot.c
@@ -0,0 +1,570 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qu8-igemm/c4-neondot.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/igemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const uint8_t** restrict a,
+    const void* restrict w,
+    uint8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const uint8_t* zero,
+    const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 6);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (6 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(uint8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 4 * sizeof(uint8_t));
+  uint8_t* c0 = c;
+  uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    c3 = c2;
+  }
+  uint8_t* c4 = (uint8_t*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    c4 = c3;
+  }
+  uint8_t* c5 = (uint8_t*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 6) {
+    c5 = c4;
+  }
+
+  const uint8x16_t vb_zero_point = vld1q_dup_u8(&params->rndnu_neon.kernel_zero_point[0]);
+
+  do {
+    // Initialize accumulators with bias. 16 bias values are loaded from the
+    // weight matrix, at the start of the group of 16 columns.
+    uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0x89AB = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0xCDEF = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc1x0123 = vpacc0x0123;
+    uint32x4_t vpacc1x4567 = vpacc0x4567;
+    uint32x4_t vpacc1x89AB = vpacc0x89AB;
+    uint32x4_t vpacc1xCDEF = vpacc0xCDEF;
+    uint32x4_t vpacc2x0123 = vpacc0x0123;
+    uint32x4_t vpacc2x4567 = vpacc0x4567;
+    uint32x4_t vpacc2x89AB = vpacc0x89AB;
+    uint32x4_t vpacc2xCDEF = vpacc0xCDEF;
+    uint32x4_t vpacc3x0123 = vpacc0x0123;
+    uint32x4_t vpacc3x4567 = vpacc0x4567;
+    uint32x4_t vpacc3x89AB = vpacc0x89AB;
+    uint32x4_t vpacc3xCDEF = vpacc0xCDEF;
+    uint32x4_t vpacc4x0123 = vpacc0x0123;
+    uint32x4_t vpacc4x4567 = vpacc0x4567;
+    uint32x4_t vpacc4x89AB = vpacc0x89AB;
+    uint32x4_t vpacc4xCDEF = vpacc0xCDEF;
+    uint32x4_t vpacc5x0123 = vpacc0x0123;
+    uint32x4_t vpacc5x4567 = vpacc0x4567;
+    uint32x4_t vpacc5x89AB = vpacc0x89AB;
+    uint32x4_t vpacc5xCDEF = vpacc0xCDEF;
+    uint32x4_t vnacc0x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc0x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc0x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc0xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc1x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc1x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc1x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc1xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc2x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc2x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc2x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc2xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc3x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc3x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc3x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc3xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc4x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc4x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc4x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc4xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc5x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc5x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc5x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc5xCDEF = vmovq_n_u32(0);
+
+    size_t p = ks;
+    do {
+      const uint8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const uint8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const uint8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      const uint8_t* restrict a3 = a[3];
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
+      }
+      const uint8_t* restrict a4 = a[4];
+      if XNN_UNPREDICTABLE(a4 != zero) {
+        a4 = (const uint8_t*) ((uintptr_t) a4 + a_offset);
+      }
+      const uint8_t* restrict a5 = a[5];
+      if XNN_UNPREDICTABLE(a5 != zero) {
+        a5 = (const uint8_t*) ((uintptr_t) a5 + a_offset);
+      }
+      a += 6;
+
+      // Inner accumulation loop along the 16 columns.
+      size_t k = kc;
+      // 2x partial unrolled loop to load 8 bytes at a time.
+      while (k >= 8 * sizeof(uint8_t)) {
+        // Load a 6x8 block of activations.
+        const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
+        const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
+        const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 8;
+        const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 8;
+        const uint8x8_t va4x01234567 = vld1_u8(a4); a4 += 8;
+        const uint8x8_t va5x01234567 = vld1_u8(a5); a5 += 8;
+
+        // Load a 8x16 block of weights.
+        const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb4567x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb4567xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+        // Multiply-accumulate: 6x8 * 8x16 --> 6x16.
+        vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+        vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+        vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+        vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+        vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
+        vnacc0x89AB = vdotq_lane_u32(vnacc0x89AB, vb_zero_point, va0x01234567, 0);
+        vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
+        vnacc0xCDEF = vdotq_lane_u32(vnacc0xCDEF, vb_zero_point, va0x01234567, 0);
+        vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
+        vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 0);
+        vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
+        vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 0);
+        vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
+        vnacc1x89AB = vdotq_lane_u32(vnacc1x89AB, vb_zero_point, va1x01234567, 0);
+        vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
+        vnacc1xCDEF = vdotq_lane_u32(vnacc1xCDEF, vb_zero_point, va1x01234567, 0);
+        vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
+        vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 0);
+        vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
+        vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 0);
+        vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
+        vnacc2x89AB = vdotq_lane_u32(vnacc2x89AB, vb_zero_point, va2x01234567, 0);
+        vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
+        vnacc2xCDEF = vdotq_lane_u32(vnacc2xCDEF, vb_zero_point, va2x01234567, 0);
+        vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
+        vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 0);
+        vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
+        vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 0);
+        vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb0123x89AB, va3x01234567, 0);
+        vnacc3x89AB = vdotq_lane_u32(vnacc3x89AB, vb_zero_point, va3x01234567, 0);
+        vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
+        vnacc3xCDEF = vdotq_lane_u32(vnacc3xCDEF, vb_zero_point, va3x01234567, 0);
+        vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
+        vnacc4x0123 = vdotq_lane_u32(vnacc4x0123, vb_zero_point, va4x01234567, 0);
+        vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
+        vnacc4x4567 = vdotq_lane_u32(vnacc4x4567, vb_zero_point, va4x01234567, 0);
+        vpacc4x89AB = vdotq_lane_u32(vpacc4x89AB, vb0123x89AB, va4x01234567, 0);
+        vnacc4x89AB = vdotq_lane_u32(vnacc4x89AB, vb_zero_point, va4x01234567, 0);
+        vpacc4xCDEF = vdotq_lane_u32(vpacc4xCDEF, vb0123xCDEF, va4x01234567, 0);
+        vnacc4xCDEF = vdotq_lane_u32(vnacc4xCDEF, vb_zero_point, va4x01234567, 0);
+        vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb0123x0123, va5x01234567, 0);
+        vnacc5x0123 = vdotq_lane_u32(vnacc5x0123, vb_zero_point, va5x01234567, 0);
+        vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb0123x4567, va5x01234567, 0);
+        vnacc5x4567 = vdotq_lane_u32(vnacc5x4567, vb_zero_point, va5x01234567, 0);
+        vpacc5x89AB = vdotq_lane_u32(vpacc5x89AB, vb0123x89AB, va5x01234567, 0);
+        vnacc5x89AB = vdotq_lane_u32(vnacc5x89AB, vb_zero_point, va5x01234567, 0);
+        vpacc5xCDEF = vdotq_lane_u32(vpacc5xCDEF, vb0123xCDEF, va5x01234567, 0);
+        vnacc5xCDEF = vdotq_lane_u32(vnacc5xCDEF, vb_zero_point, va5x01234567, 0);
+        vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
+        vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 1);
+        vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
+        vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 1);
+        vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb4567x89AB, va0x01234567, 1);
+        vnacc0x89AB = vdotq_lane_u32(vnacc0x89AB, vb_zero_point, va0x01234567, 1);
+        vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb4567xCDEF, va0x01234567, 1);
+        vnacc0xCDEF = vdotq_lane_u32(vnacc0xCDEF, vb_zero_point, va0x01234567, 1);
+        vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
+        vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 1);
+        vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
+        vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 1);
+        vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb4567x89AB, va1x01234567, 1);
+        vnacc1x89AB = vdotq_lane_u32(vnacc1x89AB, vb_zero_point, va1x01234567, 1);
+        vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb4567xCDEF, va1x01234567, 1);
+        vnacc1xCDEF = vdotq_lane_u32(vnacc1xCDEF, vb_zero_point, va1x01234567, 1);
+        vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb4567x0123, va2x01234567, 1);
+        vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 1);
+        vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb4567x4567, va2x01234567, 1);
+        vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 1);
+        vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb4567x89AB, va2x01234567, 1);
+        vnacc2x89AB = vdotq_lane_u32(vnacc2x89AB, vb_zero_point, va2x01234567, 1);
+        vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb4567xCDEF, va2x01234567, 1);
+        vnacc2xCDEF = vdotq_lane_u32(vnacc2xCDEF, vb_zero_point, va2x01234567, 1);
+        vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb4567x0123, va3x01234567, 1);
+        vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 1);
+        vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb4567x4567, va3x01234567, 1);
+        vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 1);
+        vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb4567x89AB, va3x01234567, 1);
+        vnacc3x89AB = vdotq_lane_u32(vnacc3x89AB, vb_zero_point, va3x01234567, 1);
+        vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb4567xCDEF, va3x01234567, 1);
+        vnacc3xCDEF = vdotq_lane_u32(vnacc3xCDEF, vb_zero_point, va3x01234567, 1);
+        vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb4567x0123, va4x01234567, 1);
+        vnacc4x0123 = vdotq_lane_u32(vnacc4x0123, vb_zero_point, va4x01234567, 1);
+        vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb4567x4567, va4x01234567, 1);
+        vnacc4x4567 = vdotq_lane_u32(vnacc4x4567, vb_zero_point, va4x01234567, 1);
+        vpacc4x89AB = vdotq_lane_u32(vpacc4x89AB, vb4567x89AB, va4x01234567, 1);
+        vnacc4x89AB = vdotq_lane_u32(vnacc4x89AB, vb_zero_point, va4x01234567, 1);
+        vpacc4xCDEF = vdotq_lane_u32(vpacc4xCDEF, vb4567xCDEF, va4x01234567, 1);
+        vnacc4xCDEF = vdotq_lane_u32(vnacc4xCDEF, vb_zero_point, va4x01234567, 1);
+        vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb4567x0123, va5x01234567, 1);
+        vnacc5x0123 = vdotq_lane_u32(vnacc5x0123, vb_zero_point, va5x01234567, 1);
+        vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb4567x4567, va5x01234567, 1);
+        vnacc5x4567 = vdotq_lane_u32(vnacc5x4567, vb_zero_point, va5x01234567, 1);
+        vpacc5x89AB = vdotq_lane_u32(vpacc5x89AB, vb4567x89AB, va5x01234567, 1);
+        vnacc5x89AB = vdotq_lane_u32(vnacc5x89AB, vb_zero_point, va5x01234567, 1);
+        vpacc5xCDEF = vdotq_lane_u32(vpacc5xCDEF, vb4567xCDEF, va5x01234567, 1);
+        vnacc5xCDEF = vdotq_lane_u32(vnacc5xCDEF, vb_zero_point, va5x01234567, 1);
+
+        k -= 8 * sizeof(uint8_t);
+      }
+      // Handle up to 4 final positions of `k`
+      if XNN_UNLIKELY(k != 0) {
+        // Load a 6x4 block of activations.
+        const uint8x8_t va0x01234567 = vld1_u8(a0);
+        const uint8x8_t va1x01234567 = vld1_u8(a1);
+        const uint8x8_t va2x01234567 = vld1_u8(a2);
+        const uint8x8_t va3x01234567 = vld1_u8(a3);
+        const uint8x8_t va4x01234567 = vld1_u8(a4);
+        const uint8x8_t va5x01234567 = vld1_u8(a5);
+
+        // Load a 4x16 block of weights.
+        const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+        // Multiply-accumulate: 6x4 * 4x16 --> 6x16.
+        vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+        vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+        vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+        vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+        vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
+        vnacc0x89AB = vdotq_lane_u32(vnacc0x89AB, vb_zero_point, va0x01234567, 0);
+        vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
+        vnacc0xCDEF = vdotq_lane_u32(vnacc0xCDEF, vb_zero_point, va0x01234567, 0);
+        vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
+        vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 0);
+        vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
+        vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 0);
+        vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
+        vnacc1x89AB = vdotq_lane_u32(vnacc1x89AB, vb_zero_point, va1x01234567, 0);
+        vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
+        vnacc1xCDEF = vdotq_lane_u32(vnacc1xCDEF, vb_zero_point, va1x01234567, 0);
+        vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
+        vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 0);
+        vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
+        vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 0);
+        vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
+        vnacc2x89AB = vdotq_lane_u32(vnacc2x89AB, vb_zero_point, va2x01234567, 0);
+        vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
+        vnacc2xCDEF = vdotq_lane_u32(vnacc2xCDEF, vb_zero_point, va2x01234567, 0);
+        vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
+        vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 0);
+        vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
+        vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 0);
+        vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb0123x89AB, va3x01234567, 0);
+        vnacc3x89AB = vdotq_lane_u32(vnacc3x89AB, vb_zero_point, va3x01234567, 0);
+        vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
+        vnacc3xCDEF = vdotq_lane_u32(vnacc3xCDEF, vb_zero_point, va3x01234567, 0);
+        vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
+        vnacc4x0123 = vdotq_lane_u32(vnacc4x0123, vb_zero_point, va4x01234567, 0);
+        vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
+        vnacc4x4567 = vdotq_lane_u32(vnacc4x4567, vb_zero_point, va4x01234567, 0);
+        vpacc4x89AB = vdotq_lane_u32(vpacc4x89AB, vb0123x89AB, va4x01234567, 0);
+        vnacc4x89AB = vdotq_lane_u32(vnacc4x89AB, vb_zero_point, va4x01234567, 0);
+        vpacc4xCDEF = vdotq_lane_u32(vpacc4xCDEF, vb0123xCDEF, va4x01234567, 0);
+        vnacc4xCDEF = vdotq_lane_u32(vnacc4xCDEF, vb_zero_point, va4x01234567, 0);
+        vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb0123x0123, va5x01234567, 0);
+        vnacc5x0123 = vdotq_lane_u32(vnacc5x0123, vb_zero_point, va5x01234567, 0);
+        vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb0123x4567, va5x01234567, 0);
+        vnacc5x4567 = vdotq_lane_u32(vnacc5x4567, vb_zero_point, va5x01234567, 0);
+        vpacc5x89AB = vdotq_lane_u32(vpacc5x89AB, vb0123x89AB, va5x01234567, 0);
+        vnacc5x89AB = vdotq_lane_u32(vnacc5x89AB, vb_zero_point, va5x01234567, 0);
+        vpacc5xCDEF = vdotq_lane_u32(vpacc5xCDEF, vb0123xCDEF, va5x01234567, 0);
+        vnacc5xCDEF = vdotq_lane_u32(vnacc5xCDEF, vb_zero_point, va5x01234567, 0);
+      }
+      p -= 6 * sizeof(void*);
+    } while (p != 0);
+
+    // Subtract zero point accumulators with accumulators.
+    int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
+    int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x4567));
+    int32x4_t vacc0x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc0x89AB, vnacc0x89AB));
+    int32x4_t vacc0xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc0xCDEF, vnacc0xCDEF));
+    int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
+    int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x4567));
+    int32x4_t vacc1x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc1x89AB, vnacc1x89AB));
+    int32x4_t vacc1xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc1xCDEF, vnacc1xCDEF));
+    int32x4_t vacc2x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x0123, vnacc2x0123));
+    int32x4_t vacc2x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x4567, vnacc2x4567));
+    int32x4_t vacc2x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc2x89AB, vnacc2x89AB));
+    int32x4_t vacc2xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc2xCDEF, vnacc2xCDEF));
+    int32x4_t vacc3x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x0123, vnacc3x0123));
+    int32x4_t vacc3x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x4567, vnacc3x4567));
+    int32x4_t vacc3x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc3x89AB, vnacc3x89AB));
+    int32x4_t vacc3xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc3xCDEF, vnacc3xCDEF));
+    int32x4_t vacc4x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x0123, vnacc4x0123));
+    int32x4_t vacc4x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x4567, vnacc4x4567));
+    int32x4_t vacc4x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc4x89AB, vnacc4x89AB));
+    int32x4_t vacc4xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc4xCDEF, vnacc4xCDEF));
+    int32x4_t vacc5x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc5x0123, vnacc5x0123));
+    int32x4_t vacc5x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc5x4567, vnacc5x4567));
+    int32x4_t vacc5x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc5x89AB, vnacc5x89AB));
+    int32x4_t vacc5xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc5xCDEF, vnacc5xCDEF));
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+    vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift);
+    vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift);
+    vacc4x0123 = vshlq_s32(vacc4x0123, vright_pre_shift);
+    vacc4x4567 = vshlq_s32(vacc4x4567, vright_pre_shift);
+    vacc4x89AB = vshlq_s32(vacc4x89AB, vright_pre_shift);
+    vacc4xCDEF = vshlq_s32(vacc4xCDEF, vright_pre_shift);
+    vacc5x0123 = vshlq_s32(vacc5x0123, vright_pre_shift);
+    vacc5x4567 = vshlq_s32(vacc5x4567, vright_pre_shift);
+    vacc5x89AB = vshlq_s32(vacc5x89AB, vright_pre_shift);
+    vacc5xCDEF = vshlq_s32(vacc5xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+    vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
+    vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
+    vacc4x0123 = vqdmulhq_s32(vacc4x0123, vmultiplier);
+    vacc4x4567 = vqdmulhq_s32(vacc4x4567, vmultiplier);
+    vacc4x89AB = vqdmulhq_s32(vacc4x89AB, vmultiplier);
+    vacc4xCDEF = vqdmulhq_s32(vacc4xCDEF, vmultiplier);
+    vacc5x0123 = vqdmulhq_s32(vacc5x0123, vmultiplier);
+    vacc5x4567 = vqdmulhq_s32(vacc5x4567, vmultiplier);
+    vacc5x89AB = vqdmulhq_s32(vacc5x89AB, vmultiplier);
+    vacc5xCDEF = vqdmulhq_s32(vacc5xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+    vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
+    vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
+    vacc4x0123 = vrshlq_s32(vacc4x0123, vright_post_shift);
+    vacc4x4567 = vrshlq_s32(vacc4x4567, vright_post_shift);
+    vacc4x89AB = vrshlq_s32(vacc4x89AB, vright_post_shift);
+    vacc4xCDEF = vrshlq_s32(vacc4xCDEF, vright_post_shift);
+    vacc5x0123 = vrshlq_s32(vacc5x0123, vright_post_shift);
+    vacc5x4567 = vrshlq_s32(vacc5x4567, vright_post_shift);
+    vacc5x89AB = vrshlq_s32(vacc5x89AB, vright_post_shift);
+    vacc5xCDEF = vrshlq_s32(vacc5xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
+    const int16x8_t vacc4x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x0123), vacc4x4567), voutput_zero_point);
+    const int16x8_t vacc4x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x89AB), vacc4xCDEF), voutput_zero_point);
+    const int16x8_t vacc5x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc5x0123), vacc5x4567), voutput_zero_point);
+    const int16x8_t vacc5x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc5x89AB), vacc5xCDEF), voutput_zero_point);
+
+    uint8x16_t vout0x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc0x89ABCDEF);
+    uint8x16_t vout1x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc1x01234567), vacc1x89ABCDEF);
+    uint8x16_t vout2x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc2x01234567), vacc2x89ABCDEF);
+    uint8x16_t vout3x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc3x01234567), vacc3x89ABCDEF);
+    uint8x16_t vout4x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc4x01234567), vacc4x89ABCDEF);
+    uint8x16_t vout5x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc5x01234567), vacc5x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
+    const int16x8_t vacc4x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x0123), vqmovn_s32(vacc4x4567)), voutput_zero_point);
+    const int16x8_t vacc4x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x89AB), vqmovn_s32(vacc4xCDEF)), voutput_zero_point);
+    const int16x8_t vacc5x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc5x0123), vqmovn_s32(vacc5x4567)), voutput_zero_point);
+    const int16x8_t vacc5x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc5x89AB), vqmovn_s32(vacc5xCDEF)), voutput_zero_point);
+
+    uint8x16_t vout0x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc0x89ABCDEF));
+    uint8x16_t vout1x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc1x01234567), vqmovun_s16(vacc1x89ABCDEF));
+    uint8x16_t vout2x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc2x01234567), vqmovun_s16(vacc2x89ABCDEF));
+    uint8x16_t vout3x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc3x01234567), vqmovun_s16(vacc3x89ABCDEF));
+    uint8x16_t vout4x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc4x01234567), vqmovun_s16(vacc4x89ABCDEF));
+    uint8x16_t vout5x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc5x01234567), vqmovun_s16(vacc5x89ABCDEF));
+#endif
+    const uint8x16_t voutput_min = vld1q_dup_u8(&params->rndnu_neon.output_min);
+    const uint8x16_t voutput_max = vld1q_dup_u8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_u8(vout0x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_u8(vout1x0123456789ABCDEF, voutput_min);
+    vout2x0123456789ABCDEF = vmaxq_u8(vout2x0123456789ABCDEF, voutput_min);
+    vout3x0123456789ABCDEF = vmaxq_u8(vout3x0123456789ABCDEF, voutput_min);
+    vout4x0123456789ABCDEF = vmaxq_u8(vout4x0123456789ABCDEF, voutput_min);
+    vout5x0123456789ABCDEF = vmaxq_u8(vout5x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_u8(vout0x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_u8(vout1x0123456789ABCDEF, voutput_max);
+    vout2x0123456789ABCDEF = vminq_u8(vout2x0123456789ABCDEF, voutput_max);
+    vout3x0123456789ABCDEF = vminq_u8(vout3x0123456789ABCDEF, voutput_max);
+    vout4x0123456789ABCDEF = vminq_u8(vout4x0123456789ABCDEF, voutput_max);
+    vout5x0123456789ABCDEF = vminq_u8(vout5x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_u8(c5 + 0, vout5x0123456789ABCDEF);
+      vst1q_u8(c4 + 0, vout4x0123456789ABCDEF);
+      vst1q_u8(c3 + 0, vout3x0123456789ABCDEF);
+      vst1q_u8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_u8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_u8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c5 = (uint8_t*) ((uintptr_t) c5 + cn_stride);
+      c4 = (uint8_t*) ((uintptr_t) c4 + cn_stride);
+      c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
+      c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const uint8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      uint8x16_t vout4x01234567_5x01234567 = vcombine_u8(vget_low_u8(vout4x0123456789ABCDEF), vget_low_u8(vout5x0123456789ABCDEF));
+      uint8x16_t vout2x01234567_3x01234567 = vcombine_u8(vget_low_u8(vout2x0123456789ABCDEF), vget_low_u8(vout3x0123456789ABCDEF));
+      uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vget_low_u8(vout0x0123456789ABCDEF), vget_low_u8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_u8(c5, vget_high_u8(vout4x01234567_5x01234567)); c5 += 8;
+        vst1_u8(c4, vget_low_u8(vout4x01234567_5x01234567)); c4 += 8;
+        vst1_u8(c3, vget_high_u8(vout2x01234567_3x01234567)); c3 += 8;
+        vst1_u8(c2, vget_low_u8(vout2x01234567_3x01234567)); c2 += 8;
+        vst1_u8(c1, vget_high_u8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_u8(c0, vget_low_u8(vout0x01234567_1x01234567)); c0 += 8;
+        vout4x01234567_5x01234567 = vcombine_u8(vget_high_u8(vout4x0123456789ABCDEF), vget_high_u8(vout5x0123456789ABCDEF));
+        vout2x01234567_3x01234567 = vcombine_u8(vget_high_u8(vout2x0123456789ABCDEF), vget_high_u8(vout3x0123456789ABCDEF));
+        vout0x01234567_1x01234567 = vcombine_u8(vget_high_u8(vout0x0123456789ABCDEF), vget_high_u8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c5, 1), vreinterpretq_u32_u8(vout4x01234567_5x01234567), 2); c5 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c4, 1), vreinterpretq_u32_u8(vout4x01234567_5x01234567), 0); c4 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 4);
+        vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c5, 1), vreinterpretq_u16_u8(vout4x01234567_5x01234567), 4); c5 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c4, 1), vreinterpretq_u16_u8(vout4x01234567_5x01234567), 0); c4 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_u8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_u8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+        vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_u8(c5, vout4x01234567_5x01234567, 8);
+        vst1q_lane_u8(c4, vout4x01234567_5x01234567, 0);
+        vst1q_lane_u8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_u8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qu8-igemm/gen/6x8c4-minmax-rndnu-neondot.c b/src/qu8-igemm/gen/6x8c4-minmax-rndnu-neondot.c
new file mode 100644
index 0000000..d027ce7
--- /dev/null
+++ b/src/qu8-igemm/gen/6x8c4-minmax-rndnu-neondot.c
@@ -0,0 +1,382 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qu8-igemm/c4-neondot.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/igemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const uint8_t** restrict a,
+    const void* restrict w,
+    uint8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const uint8_t* zero,
+    const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 6);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (6 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(uint8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 4 * sizeof(uint8_t));
+  uint8_t* c0 = c;
+  uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    c3 = c2;
+  }
+  uint8_t* c4 = (uint8_t*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    c4 = c3;
+  }
+  uint8_t* c5 = (uint8_t*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 6) {
+    c5 = c4;
+  }
+
+  const uint8x16_t vb_zero_point = vld1q_dup_u8(&params->rndnu_neon.kernel_zero_point[0]);
+
+  do {
+    // Initialize accumulators with bias. 8 bias values are loaded from the
+    // weight matrix, at the start of the group of 8 columns.
+    uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc1x0123 = vpacc0x0123;
+    uint32x4_t vpacc1x4567 = vpacc0x4567;
+    uint32x4_t vpacc2x0123 = vpacc0x0123;
+    uint32x4_t vpacc2x4567 = vpacc0x4567;
+    uint32x4_t vpacc3x0123 = vpacc0x0123;
+    uint32x4_t vpacc3x4567 = vpacc0x4567;
+    uint32x4_t vpacc4x0123 = vpacc0x0123;
+    uint32x4_t vpacc4x4567 = vpacc0x4567;
+    uint32x4_t vpacc5x0123 = vpacc0x0123;
+    uint32x4_t vpacc5x4567 = vpacc0x4567;
+    uint32x4_t vnacc0x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc0x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc1x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc1x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc2x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc2x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc3x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc3x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc4x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc4x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc5x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc5x4567 = vmovq_n_u32(0);
+
+    size_t p = ks;
+    do {
+      const uint8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const uint8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const uint8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      const uint8_t* restrict a3 = a[3];
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
+      }
+      const uint8_t* restrict a4 = a[4];
+      if XNN_UNPREDICTABLE(a4 != zero) {
+        a4 = (const uint8_t*) ((uintptr_t) a4 + a_offset);
+      }
+      const uint8_t* restrict a5 = a[5];
+      if XNN_UNPREDICTABLE(a5 != zero) {
+        a5 = (const uint8_t*) ((uintptr_t) a5 + a_offset);
+      }
+      a += 6;
+
+      // Inner accumulation loop along the 8 columns.
+      size_t k = kc;
+      // 2x partial unrolled loop to load 8 bytes at a time.
+      while (k >= 8 * sizeof(uint8_t)) {
+        // Load a 6x8 block of activations.
+        const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
+        const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
+        const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 8;
+        const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 8;
+        const uint8x8_t va4x01234567 = vld1_u8(a4); a4 += 8;
+        const uint8x8_t va5x01234567 = vld1_u8(a5); a5 += 8;
+
+        // Load a 8x8 block of weights.
+        const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+        // Multiply-accumulate: 6x8 * 8x8 --> 6x8.
+        vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+        vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+        vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+        vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+        vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
+        vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 0);
+        vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
+        vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 0);
+        vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
+        vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 0);
+        vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
+        vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 0);
+        vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
+        vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 0);
+        vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
+        vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 0);
+        vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
+        vnacc4x0123 = vdotq_lane_u32(vnacc4x0123, vb_zero_point, va4x01234567, 0);
+        vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
+        vnacc4x4567 = vdotq_lane_u32(vnacc4x4567, vb_zero_point, va4x01234567, 0);
+        vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb0123x0123, va5x01234567, 0);
+        vnacc5x0123 = vdotq_lane_u32(vnacc5x0123, vb_zero_point, va5x01234567, 0);
+        vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb0123x4567, va5x01234567, 0);
+        vnacc5x4567 = vdotq_lane_u32(vnacc5x4567, vb_zero_point, va5x01234567, 0);
+        vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
+        vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 1);
+        vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
+        vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 1);
+        vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
+        vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 1);
+        vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
+        vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 1);
+        vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb4567x0123, va2x01234567, 1);
+        vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 1);
+        vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb4567x4567, va2x01234567, 1);
+        vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 1);
+        vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb4567x0123, va3x01234567, 1);
+        vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 1);
+        vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb4567x4567, va3x01234567, 1);
+        vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 1);
+        vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb4567x0123, va4x01234567, 1);
+        vnacc4x0123 = vdotq_lane_u32(vnacc4x0123, vb_zero_point, va4x01234567, 1);
+        vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb4567x4567, va4x01234567, 1);
+        vnacc4x4567 = vdotq_lane_u32(vnacc4x4567, vb_zero_point, va4x01234567, 1);
+        vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb4567x0123, va5x01234567, 1);
+        vnacc5x0123 = vdotq_lane_u32(vnacc5x0123, vb_zero_point, va5x01234567, 1);
+        vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb4567x4567, va5x01234567, 1);
+        vnacc5x4567 = vdotq_lane_u32(vnacc5x4567, vb_zero_point, va5x01234567, 1);
+
+        k -= 8 * sizeof(uint8_t);
+      }
+      // Handle up to 4 final positions of `k`
+      if XNN_UNLIKELY(k != 0) {
+        // Load a 6x4 block of activations.
+        const uint8x8_t va0x01234567 = vld1_u8(a0);
+        const uint8x8_t va1x01234567 = vld1_u8(a1);
+        const uint8x8_t va2x01234567 = vld1_u8(a2);
+        const uint8x8_t va3x01234567 = vld1_u8(a3);
+        const uint8x8_t va4x01234567 = vld1_u8(a4);
+        const uint8x8_t va5x01234567 = vld1_u8(a5);
+
+        // Load a 4x8 block of weights.
+        const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+        // Multiply-accumulate: 6x4 * 4x8 --> 6x8.
+        vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+        vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+        vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+        vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+        vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
+        vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 0);
+        vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
+        vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 0);
+        vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
+        vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 0);
+        vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
+        vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 0);
+        vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
+        vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 0);
+        vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
+        vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 0);
+        vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
+        vnacc4x0123 = vdotq_lane_u32(vnacc4x0123, vb_zero_point, va4x01234567, 0);
+        vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
+        vnacc4x4567 = vdotq_lane_u32(vnacc4x4567, vb_zero_point, va4x01234567, 0);
+        vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb0123x0123, va5x01234567, 0);
+        vnacc5x0123 = vdotq_lane_u32(vnacc5x0123, vb_zero_point, va5x01234567, 0);
+        vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb0123x4567, va5x01234567, 0);
+        vnacc5x4567 = vdotq_lane_u32(vnacc5x4567, vb_zero_point, va5x01234567, 0);
+      }
+      p -= 6 * sizeof(void*);
+    } while (p != 0);
+
+    // Subtract zero point accumulators with accumulators.
+    int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
+    int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x4567));
+    int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
+    int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x4567));
+    int32x4_t vacc2x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x0123, vnacc2x0123));
+    int32x4_t vacc2x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x4567, vnacc2x4567));
+    int32x4_t vacc3x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x0123, vnacc3x0123));
+    int32x4_t vacc3x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x4567, vnacc3x4567));
+    int32x4_t vacc4x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x0123, vnacc4x0123));
+    int32x4_t vacc4x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x4567, vnacc4x4567));
+    int32x4_t vacc5x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc5x0123, vnacc5x0123));
+    int32x4_t vacc5x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc5x4567, vnacc5x4567));
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+    vacc4x0123 = vshlq_s32(vacc4x0123, vright_pre_shift);
+    vacc4x4567 = vshlq_s32(vacc4x4567, vright_pre_shift);
+    vacc5x0123 = vshlq_s32(vacc5x0123, vright_pre_shift);
+    vacc5x4567 = vshlq_s32(vacc5x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+    vacc4x0123 = vqdmulhq_s32(vacc4x0123, vmultiplier);
+    vacc4x4567 = vqdmulhq_s32(vacc4x4567, vmultiplier);
+    vacc5x0123 = vqdmulhq_s32(vacc5x0123, vmultiplier);
+    vacc5x4567 = vqdmulhq_s32(vacc5x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+    vacc4x0123 = vrshlq_s32(vacc4x0123, vright_post_shift);
+    vacc4x4567 = vrshlq_s32(vacc4x4567, vright_post_shift);
+    vacc5x0123 = vrshlq_s32(vacc5x0123, vright_post_shift);
+    vacc5x4567 = vrshlq_s32(vacc5x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+    const int16x8_t vacc4x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x0123), vacc4x4567), voutput_zero_point);
+    const int16x8_t vacc5x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc5x0123), vacc5x4567), voutput_zero_point);
+
+    uint8x16_t vout0x01234567_1x01234567 = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc1x01234567);
+    uint8x16_t vout2x01234567_3x01234567 = vqmovun_high_s16(vqmovun_s16(vacc2x01234567), vacc3x01234567);
+    uint8x16_t vout4x01234567_5x01234567 = vqmovun_high_s16(vqmovun_s16(vacc4x01234567), vacc5x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+    const int16x8_t vacc4x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x0123), vqmovn_s32(vacc4x4567)), voutput_zero_point);
+    const int16x8_t vacc5x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc5x0123), vqmovn_s32(vacc5x4567)), voutput_zero_point);
+
+    uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc1x01234567));
+    uint8x16_t vout2x01234567_3x01234567 = vcombine_u8(vqmovun_s16(vacc2x01234567), vqmovun_s16(vacc3x01234567));
+    uint8x16_t vout4x01234567_5x01234567 = vcombine_u8(vqmovun_s16(vacc4x01234567), vqmovun_s16(vacc5x01234567));
+#endif
+    const uint8x16_t voutput_min = vld1q_dup_u8(&params->rndnu_neon.output_min);
+    const uint8x16_t voutput_max = vld1q_dup_u8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_u8(vout0x01234567_1x01234567, voutput_min);
+    vout2x01234567_3x01234567 = vmaxq_u8(vout2x01234567_3x01234567, voutput_min);
+    vout4x01234567_5x01234567 = vmaxq_u8(vout4x01234567_5x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_u8(vout0x01234567_1x01234567, voutput_max);
+    vout2x01234567_3x01234567 = vminq_u8(vout2x01234567_3x01234567, voutput_max);
+    vout4x01234567_5x01234567 = vminq_u8(vout4x01234567_5x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_u8(c5 + 0, vget_high_u8(vout4x01234567_5x01234567));
+      vst1_u8(c4 + 0, vget_low_u8(vout4x01234567_5x01234567));
+      vst1_u8(c3 + 0, vget_high_u8(vout2x01234567_3x01234567));
+      vst1_u8(c2 + 0, vget_low_u8(vout2x01234567_3x01234567));
+      vst1_u8(c1 + 0, vget_high_u8(vout0x01234567_1x01234567));
+      vst1_u8(c0 + 0, vget_low_u8(vout0x01234567_1x01234567));
+
+      c5 = (uint8_t*) ((uintptr_t) c5 + cn_stride);
+      c4 = (uint8_t*) ((uintptr_t) c4 + cn_stride);
+      c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
+      c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const uint8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c5, 1), vreinterpretq_u32_u8(vout4x01234567_5x01234567), 2); c5 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c4, 1), vreinterpretq_u32_u8(vout4x01234567_5x01234567), 0); c4 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 4);
+        vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c5, 1), vreinterpretq_u16_u8(vout4x01234567_5x01234567), 4); c5 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c4, 1), vreinterpretq_u16_u8(vout4x01234567_5x01234567), 0); c4 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_u8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_u8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+        vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_u8(c5, vout4x01234567_5x01234567, 8);
+        vst1q_lane_u8(c4, vout4x01234567_5x01234567, 0);
+        vst1q_lane_u8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_u8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qu8-igemm/gen/8x16c4-minmax-rndnu-neondot.c b/src/qu8-igemm/gen/8x16c4-minmax-rndnu-neondot.c
new file mode 100644
index 0000000..8117ed1
--- /dev/null
+++ b/src/qu8-igemm/gen/8x16c4-minmax-rndnu-neondot.c
@@ -0,0 +1,718 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qu8-igemm/c4-neondot.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/igemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const uint8_t** restrict a,
+    const void* restrict w,
+    uint8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const uint8_t* zero,
+    const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 8);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (8 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(uint8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 4 * sizeof(uint8_t));
+  uint8_t* c0 = c;
+  uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    c3 = c2;
+  }
+  uint8_t* c4 = (uint8_t*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    c4 = c3;
+  }
+  uint8_t* c5 = (uint8_t*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 6) {
+    c5 = c4;
+  }
+  uint8_t* c6 = (uint8_t*) ((uintptr_t) c5 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 6) {
+    c6 = c5;
+  }
+  uint8_t* c7 = (uint8_t*) ((uintptr_t) c6 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 8) {
+    c7 = c6;
+  }
+
+  const uint8x16_t vb_zero_point = vld1q_dup_u8(&params->rndnu_neon.kernel_zero_point[0]);
+
+  do {
+    // Initialize accumulators with bias. 16 bias values are loaded from the
+    // weight matrix, at the start of the group of 16 columns.
+    uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0x89AB = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0xCDEF = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc1x0123 = vpacc0x0123;
+    uint32x4_t vpacc1x4567 = vpacc0x4567;
+    uint32x4_t vpacc1x89AB = vpacc0x89AB;
+    uint32x4_t vpacc1xCDEF = vpacc0xCDEF;
+    uint32x4_t vpacc2x0123 = vpacc0x0123;
+    uint32x4_t vpacc2x4567 = vpacc0x4567;
+    uint32x4_t vpacc2x89AB = vpacc0x89AB;
+    uint32x4_t vpacc2xCDEF = vpacc0xCDEF;
+    uint32x4_t vpacc3x0123 = vpacc0x0123;
+    uint32x4_t vpacc3x4567 = vpacc0x4567;
+    uint32x4_t vpacc3x89AB = vpacc0x89AB;
+    uint32x4_t vpacc3xCDEF = vpacc0xCDEF;
+    uint32x4_t vpacc4x0123 = vpacc0x0123;
+    uint32x4_t vpacc4x4567 = vpacc0x4567;
+    uint32x4_t vpacc4x89AB = vpacc0x89AB;
+    uint32x4_t vpacc4xCDEF = vpacc0xCDEF;
+    uint32x4_t vpacc5x0123 = vpacc0x0123;
+    uint32x4_t vpacc5x4567 = vpacc0x4567;
+    uint32x4_t vpacc5x89AB = vpacc0x89AB;
+    uint32x4_t vpacc5xCDEF = vpacc0xCDEF;
+    uint32x4_t vpacc6x0123 = vpacc0x0123;
+    uint32x4_t vpacc6x4567 = vpacc0x4567;
+    uint32x4_t vpacc6x89AB = vpacc0x89AB;
+    uint32x4_t vpacc6xCDEF = vpacc0xCDEF;
+    uint32x4_t vpacc7x0123 = vpacc0x0123;
+    uint32x4_t vpacc7x4567 = vpacc0x4567;
+    uint32x4_t vpacc7x89AB = vpacc0x89AB;
+    uint32x4_t vpacc7xCDEF = vpacc0xCDEF;
+    uint32x4_t vnacc0x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc0x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc0x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc0xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc1x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc1x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc1x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc1xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc2x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc2x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc2x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc2xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc3x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc3x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc3x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc3xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc4x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc4x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc4x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc4xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc5x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc5x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc5x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc5xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc6x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc6x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc6x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc6xCDEF = vmovq_n_u32(0);
+    uint32x4_t vnacc7x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc7x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc7x89AB = vmovq_n_u32(0);
+    uint32x4_t vnacc7xCDEF = vmovq_n_u32(0);
+
+    size_t p = ks;
+    do {
+      const uint8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const uint8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const uint8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      const uint8_t* restrict a3 = a[3];
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
+      }
+      const uint8_t* restrict a4 = a[4];
+      if XNN_UNPREDICTABLE(a4 != zero) {
+        a4 = (const uint8_t*) ((uintptr_t) a4 + a_offset);
+      }
+      const uint8_t* restrict a5 = a[5];
+      if XNN_UNPREDICTABLE(a5 != zero) {
+        a5 = (const uint8_t*) ((uintptr_t) a5 + a_offset);
+      }
+      const uint8_t* restrict a6 = a[6];
+      if XNN_UNPREDICTABLE(a6 != zero) {
+        a6 = (const uint8_t*) ((uintptr_t) a6 + a_offset);
+      }
+      const uint8_t* restrict a7 = a[7];
+      if XNN_UNPREDICTABLE(a7 != zero) {
+        a7 = (const uint8_t*) ((uintptr_t) a7 + a_offset);
+      }
+      a += 8;
+
+      // Inner accumulation loop along the 16 columns.
+      size_t k = kc;
+      // 2x partial unrolled loop to load 8 bytes at a time.
+      while (k >= 8 * sizeof(uint8_t)) {
+        // Load a 8x8 block of activations.
+        const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
+        const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
+        const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 8;
+        const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 8;
+        const uint8x8_t va4x01234567 = vld1_u8(a4); a4 += 8;
+        const uint8x8_t va5x01234567 = vld1_u8(a5); a5 += 8;
+        const uint8x8_t va6x01234567 = vld1_u8(a6); a6 += 8;
+        const uint8x8_t va7x01234567 = vld1_u8(a7); a7 += 8;
+
+        // Load a 8x16 block of weights.
+        const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb4567x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb4567xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+        // Multiply-accumulate: 8x8 * 8x16 --> 8x16.
+        vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+        vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+        vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+        vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+        vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
+        vnacc0x89AB = vdotq_lane_u32(vnacc0x89AB, vb_zero_point, va0x01234567, 0);
+        vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
+        vnacc0xCDEF = vdotq_lane_u32(vnacc0xCDEF, vb_zero_point, va0x01234567, 0);
+        vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
+        vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 0);
+        vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
+        vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 0);
+        vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
+        vnacc1x89AB = vdotq_lane_u32(vnacc1x89AB, vb_zero_point, va1x01234567, 0);
+        vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
+        vnacc1xCDEF = vdotq_lane_u32(vnacc1xCDEF, vb_zero_point, va1x01234567, 0);
+        vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
+        vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 0);
+        vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
+        vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 0);
+        vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
+        vnacc2x89AB = vdotq_lane_u32(vnacc2x89AB, vb_zero_point, va2x01234567, 0);
+        vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
+        vnacc2xCDEF = vdotq_lane_u32(vnacc2xCDEF, vb_zero_point, va2x01234567, 0);
+        vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
+        vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 0);
+        vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
+        vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 0);
+        vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb0123x89AB, va3x01234567, 0);
+        vnacc3x89AB = vdotq_lane_u32(vnacc3x89AB, vb_zero_point, va3x01234567, 0);
+        vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
+        vnacc3xCDEF = vdotq_lane_u32(vnacc3xCDEF, vb_zero_point, va3x01234567, 0);
+        vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
+        vnacc4x0123 = vdotq_lane_u32(vnacc4x0123, vb_zero_point, va4x01234567, 0);
+        vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
+        vnacc4x4567 = vdotq_lane_u32(vnacc4x4567, vb_zero_point, va4x01234567, 0);
+        vpacc4x89AB = vdotq_lane_u32(vpacc4x89AB, vb0123x89AB, va4x01234567, 0);
+        vnacc4x89AB = vdotq_lane_u32(vnacc4x89AB, vb_zero_point, va4x01234567, 0);
+        vpacc4xCDEF = vdotq_lane_u32(vpacc4xCDEF, vb0123xCDEF, va4x01234567, 0);
+        vnacc4xCDEF = vdotq_lane_u32(vnacc4xCDEF, vb_zero_point, va4x01234567, 0);
+        vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb0123x0123, va5x01234567, 0);
+        vnacc5x0123 = vdotq_lane_u32(vnacc5x0123, vb_zero_point, va5x01234567, 0);
+        vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb0123x4567, va5x01234567, 0);
+        vnacc5x4567 = vdotq_lane_u32(vnacc5x4567, vb_zero_point, va5x01234567, 0);
+        vpacc5x89AB = vdotq_lane_u32(vpacc5x89AB, vb0123x89AB, va5x01234567, 0);
+        vnacc5x89AB = vdotq_lane_u32(vnacc5x89AB, vb_zero_point, va5x01234567, 0);
+        vpacc5xCDEF = vdotq_lane_u32(vpacc5xCDEF, vb0123xCDEF, va5x01234567, 0);
+        vnacc5xCDEF = vdotq_lane_u32(vnacc5xCDEF, vb_zero_point, va5x01234567, 0);
+        vpacc6x0123 = vdotq_lane_u32(vpacc6x0123, vb0123x0123, va6x01234567, 0);
+        vnacc6x0123 = vdotq_lane_u32(vnacc6x0123, vb_zero_point, va6x01234567, 0);
+        vpacc6x4567 = vdotq_lane_u32(vpacc6x4567, vb0123x4567, va6x01234567, 0);
+        vnacc6x4567 = vdotq_lane_u32(vnacc6x4567, vb_zero_point, va6x01234567, 0);
+        vpacc6x89AB = vdotq_lane_u32(vpacc6x89AB, vb0123x89AB, va6x01234567, 0);
+        vnacc6x89AB = vdotq_lane_u32(vnacc6x89AB, vb_zero_point, va6x01234567, 0);
+        vpacc6xCDEF = vdotq_lane_u32(vpacc6xCDEF, vb0123xCDEF, va6x01234567, 0);
+        vnacc6xCDEF = vdotq_lane_u32(vnacc6xCDEF, vb_zero_point, va6x01234567, 0);
+        vpacc7x0123 = vdotq_lane_u32(vpacc7x0123, vb0123x0123, va7x01234567, 0);
+        vnacc7x0123 = vdotq_lane_u32(vnacc7x0123, vb_zero_point, va7x01234567, 0);
+        vpacc7x4567 = vdotq_lane_u32(vpacc7x4567, vb0123x4567, va7x01234567, 0);
+        vnacc7x4567 = vdotq_lane_u32(vnacc7x4567, vb_zero_point, va7x01234567, 0);
+        vpacc7x89AB = vdotq_lane_u32(vpacc7x89AB, vb0123x89AB, va7x01234567, 0);
+        vnacc7x89AB = vdotq_lane_u32(vnacc7x89AB, vb_zero_point, va7x01234567, 0);
+        vpacc7xCDEF = vdotq_lane_u32(vpacc7xCDEF, vb0123xCDEF, va7x01234567, 0);
+        vnacc7xCDEF = vdotq_lane_u32(vnacc7xCDEF, vb_zero_point, va7x01234567, 0);
+        vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
+        vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 1);
+        vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
+        vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 1);
+        vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb4567x89AB, va0x01234567, 1);
+        vnacc0x89AB = vdotq_lane_u32(vnacc0x89AB, vb_zero_point, va0x01234567, 1);
+        vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb4567xCDEF, va0x01234567, 1);
+        vnacc0xCDEF = vdotq_lane_u32(vnacc0xCDEF, vb_zero_point, va0x01234567, 1);
+        vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
+        vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 1);
+        vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
+        vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 1);
+        vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb4567x89AB, va1x01234567, 1);
+        vnacc1x89AB = vdotq_lane_u32(vnacc1x89AB, vb_zero_point, va1x01234567, 1);
+        vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb4567xCDEF, va1x01234567, 1);
+        vnacc1xCDEF = vdotq_lane_u32(vnacc1xCDEF, vb_zero_point, va1x01234567, 1);
+        vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb4567x0123, va2x01234567, 1);
+        vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 1);
+        vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb4567x4567, va2x01234567, 1);
+        vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 1);
+        vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb4567x89AB, va2x01234567, 1);
+        vnacc2x89AB = vdotq_lane_u32(vnacc2x89AB, vb_zero_point, va2x01234567, 1);
+        vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb4567xCDEF, va2x01234567, 1);
+        vnacc2xCDEF = vdotq_lane_u32(vnacc2xCDEF, vb_zero_point, va2x01234567, 1);
+        vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb4567x0123, va3x01234567, 1);
+        vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 1);
+        vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb4567x4567, va3x01234567, 1);
+        vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 1);
+        vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb4567x89AB, va3x01234567, 1);
+        vnacc3x89AB = vdotq_lane_u32(vnacc3x89AB, vb_zero_point, va3x01234567, 1);
+        vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb4567xCDEF, va3x01234567, 1);
+        vnacc3xCDEF = vdotq_lane_u32(vnacc3xCDEF, vb_zero_point, va3x01234567, 1);
+        vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb4567x0123, va4x01234567, 1);
+        vnacc4x0123 = vdotq_lane_u32(vnacc4x0123, vb_zero_point, va4x01234567, 1);
+        vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb4567x4567, va4x01234567, 1);
+        vnacc4x4567 = vdotq_lane_u32(vnacc4x4567, vb_zero_point, va4x01234567, 1);
+        vpacc4x89AB = vdotq_lane_u32(vpacc4x89AB, vb4567x89AB, va4x01234567, 1);
+        vnacc4x89AB = vdotq_lane_u32(vnacc4x89AB, vb_zero_point, va4x01234567, 1);
+        vpacc4xCDEF = vdotq_lane_u32(vpacc4xCDEF, vb4567xCDEF, va4x01234567, 1);
+        vnacc4xCDEF = vdotq_lane_u32(vnacc4xCDEF, vb_zero_point, va4x01234567, 1);
+        vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb4567x0123, va5x01234567, 1);
+        vnacc5x0123 = vdotq_lane_u32(vnacc5x0123, vb_zero_point, va5x01234567, 1);
+        vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb4567x4567, va5x01234567, 1);
+        vnacc5x4567 = vdotq_lane_u32(vnacc5x4567, vb_zero_point, va5x01234567, 1);
+        vpacc5x89AB = vdotq_lane_u32(vpacc5x89AB, vb4567x89AB, va5x01234567, 1);
+        vnacc5x89AB = vdotq_lane_u32(vnacc5x89AB, vb_zero_point, va5x01234567, 1);
+        vpacc5xCDEF = vdotq_lane_u32(vpacc5xCDEF, vb4567xCDEF, va5x01234567, 1);
+        vnacc5xCDEF = vdotq_lane_u32(vnacc5xCDEF, vb_zero_point, va5x01234567, 1);
+        vpacc6x0123 = vdotq_lane_u32(vpacc6x0123, vb4567x0123, va6x01234567, 1);
+        vnacc6x0123 = vdotq_lane_u32(vnacc6x0123, vb_zero_point, va6x01234567, 1);
+        vpacc6x4567 = vdotq_lane_u32(vpacc6x4567, vb4567x4567, va6x01234567, 1);
+        vnacc6x4567 = vdotq_lane_u32(vnacc6x4567, vb_zero_point, va6x01234567, 1);
+        vpacc6x89AB = vdotq_lane_u32(vpacc6x89AB, vb4567x89AB, va6x01234567, 1);
+        vnacc6x89AB = vdotq_lane_u32(vnacc6x89AB, vb_zero_point, va6x01234567, 1);
+        vpacc6xCDEF = vdotq_lane_u32(vpacc6xCDEF, vb4567xCDEF, va6x01234567, 1);
+        vnacc6xCDEF = vdotq_lane_u32(vnacc6xCDEF, vb_zero_point, va6x01234567, 1);
+        vpacc7x0123 = vdotq_lane_u32(vpacc7x0123, vb4567x0123, va7x01234567, 1);
+        vnacc7x0123 = vdotq_lane_u32(vnacc7x0123, vb_zero_point, va7x01234567, 1);
+        vpacc7x4567 = vdotq_lane_u32(vpacc7x4567, vb4567x4567, va7x01234567, 1);
+        vnacc7x4567 = vdotq_lane_u32(vnacc7x4567, vb_zero_point, va7x01234567, 1);
+        vpacc7x89AB = vdotq_lane_u32(vpacc7x89AB, vb4567x89AB, va7x01234567, 1);
+        vnacc7x89AB = vdotq_lane_u32(vnacc7x89AB, vb_zero_point, va7x01234567, 1);
+        vpacc7xCDEF = vdotq_lane_u32(vpacc7xCDEF, vb4567xCDEF, va7x01234567, 1);
+        vnacc7xCDEF = vdotq_lane_u32(vnacc7xCDEF, vb_zero_point, va7x01234567, 1);
+
+        k -= 8 * sizeof(uint8_t);
+      }
+      // Handle up to 4 final positions of `k`
+      if XNN_UNLIKELY(k != 0) {
+        // Load a 8x4 block of activations.
+        const uint8x8_t va0x01234567 = vld1_u8(a0);
+        const uint8x8_t va1x01234567 = vld1_u8(a1);
+        const uint8x8_t va2x01234567 = vld1_u8(a2);
+        const uint8x8_t va3x01234567 = vld1_u8(a3);
+        const uint8x8_t va4x01234567 = vld1_u8(a4);
+        const uint8x8_t va5x01234567 = vld1_u8(a5);
+        const uint8x8_t va6x01234567 = vld1_u8(a6);
+        const uint8x8_t va7x01234567 = vld1_u8(a7);
+
+        // Load a 4x16 block of weights.
+        const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+        // Multiply-accumulate: 8x4 * 4x16 --> 8x16.
+        vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+        vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+        vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+        vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+        vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
+        vnacc0x89AB = vdotq_lane_u32(vnacc0x89AB, vb_zero_point, va0x01234567, 0);
+        vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
+        vnacc0xCDEF = vdotq_lane_u32(vnacc0xCDEF, vb_zero_point, va0x01234567, 0);
+        vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
+        vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 0);
+        vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
+        vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 0);
+        vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
+        vnacc1x89AB = vdotq_lane_u32(vnacc1x89AB, vb_zero_point, va1x01234567, 0);
+        vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
+        vnacc1xCDEF = vdotq_lane_u32(vnacc1xCDEF, vb_zero_point, va1x01234567, 0);
+        vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
+        vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 0);
+        vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
+        vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 0);
+        vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
+        vnacc2x89AB = vdotq_lane_u32(vnacc2x89AB, vb_zero_point, va2x01234567, 0);
+        vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
+        vnacc2xCDEF = vdotq_lane_u32(vnacc2xCDEF, vb_zero_point, va2x01234567, 0);
+        vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
+        vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 0);
+        vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
+        vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 0);
+        vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb0123x89AB, va3x01234567, 0);
+        vnacc3x89AB = vdotq_lane_u32(vnacc3x89AB, vb_zero_point, va3x01234567, 0);
+        vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
+        vnacc3xCDEF = vdotq_lane_u32(vnacc3xCDEF, vb_zero_point, va3x01234567, 0);
+        vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
+        vnacc4x0123 = vdotq_lane_u32(vnacc4x0123, vb_zero_point, va4x01234567, 0);
+        vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
+        vnacc4x4567 = vdotq_lane_u32(vnacc4x4567, vb_zero_point, va4x01234567, 0);
+        vpacc4x89AB = vdotq_lane_u32(vpacc4x89AB, vb0123x89AB, va4x01234567, 0);
+        vnacc4x89AB = vdotq_lane_u32(vnacc4x89AB, vb_zero_point, va4x01234567, 0);
+        vpacc4xCDEF = vdotq_lane_u32(vpacc4xCDEF, vb0123xCDEF, va4x01234567, 0);
+        vnacc4xCDEF = vdotq_lane_u32(vnacc4xCDEF, vb_zero_point, va4x01234567, 0);
+        vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb0123x0123, va5x01234567, 0);
+        vnacc5x0123 = vdotq_lane_u32(vnacc5x0123, vb_zero_point, va5x01234567, 0);
+        vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb0123x4567, va5x01234567, 0);
+        vnacc5x4567 = vdotq_lane_u32(vnacc5x4567, vb_zero_point, va5x01234567, 0);
+        vpacc5x89AB = vdotq_lane_u32(vpacc5x89AB, vb0123x89AB, va5x01234567, 0);
+        vnacc5x89AB = vdotq_lane_u32(vnacc5x89AB, vb_zero_point, va5x01234567, 0);
+        vpacc5xCDEF = vdotq_lane_u32(vpacc5xCDEF, vb0123xCDEF, va5x01234567, 0);
+        vnacc5xCDEF = vdotq_lane_u32(vnacc5xCDEF, vb_zero_point, va5x01234567, 0);
+        vpacc6x0123 = vdotq_lane_u32(vpacc6x0123, vb0123x0123, va6x01234567, 0);
+        vnacc6x0123 = vdotq_lane_u32(vnacc6x0123, vb_zero_point, va6x01234567, 0);
+        vpacc6x4567 = vdotq_lane_u32(vpacc6x4567, vb0123x4567, va6x01234567, 0);
+        vnacc6x4567 = vdotq_lane_u32(vnacc6x4567, vb_zero_point, va6x01234567, 0);
+        vpacc6x89AB = vdotq_lane_u32(vpacc6x89AB, vb0123x89AB, va6x01234567, 0);
+        vnacc6x89AB = vdotq_lane_u32(vnacc6x89AB, vb_zero_point, va6x01234567, 0);
+        vpacc6xCDEF = vdotq_lane_u32(vpacc6xCDEF, vb0123xCDEF, va6x01234567, 0);
+        vnacc6xCDEF = vdotq_lane_u32(vnacc6xCDEF, vb_zero_point, va6x01234567, 0);
+        vpacc7x0123 = vdotq_lane_u32(vpacc7x0123, vb0123x0123, va7x01234567, 0);
+        vnacc7x0123 = vdotq_lane_u32(vnacc7x0123, vb_zero_point, va7x01234567, 0);
+        vpacc7x4567 = vdotq_lane_u32(vpacc7x4567, vb0123x4567, va7x01234567, 0);
+        vnacc7x4567 = vdotq_lane_u32(vnacc7x4567, vb_zero_point, va7x01234567, 0);
+        vpacc7x89AB = vdotq_lane_u32(vpacc7x89AB, vb0123x89AB, va7x01234567, 0);
+        vnacc7x89AB = vdotq_lane_u32(vnacc7x89AB, vb_zero_point, va7x01234567, 0);
+        vpacc7xCDEF = vdotq_lane_u32(vpacc7xCDEF, vb0123xCDEF, va7x01234567, 0);
+        vnacc7xCDEF = vdotq_lane_u32(vnacc7xCDEF, vb_zero_point, va7x01234567, 0);
+      }
+      p -= 8 * sizeof(void*);
+    } while (p != 0);
+
+    // Subtract zero point accumulators with accumulators.
+    int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
+    int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x4567));
+    int32x4_t vacc0x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc0x89AB, vnacc0x89AB));
+    int32x4_t vacc0xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc0xCDEF, vnacc0xCDEF));
+    int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
+    int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x4567));
+    int32x4_t vacc1x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc1x89AB, vnacc1x89AB));
+    int32x4_t vacc1xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc1xCDEF, vnacc1xCDEF));
+    int32x4_t vacc2x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x0123, vnacc2x0123));
+    int32x4_t vacc2x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x4567, vnacc2x4567));
+    int32x4_t vacc2x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc2x89AB, vnacc2x89AB));
+    int32x4_t vacc2xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc2xCDEF, vnacc2xCDEF));
+    int32x4_t vacc3x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x0123, vnacc3x0123));
+    int32x4_t vacc3x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x4567, vnacc3x4567));
+    int32x4_t vacc3x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc3x89AB, vnacc3x89AB));
+    int32x4_t vacc3xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc3xCDEF, vnacc3xCDEF));
+    int32x4_t vacc4x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x0123, vnacc4x0123));
+    int32x4_t vacc4x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x4567, vnacc4x4567));
+    int32x4_t vacc4x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc4x89AB, vnacc4x89AB));
+    int32x4_t vacc4xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc4xCDEF, vnacc4xCDEF));
+    int32x4_t vacc5x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc5x0123, vnacc5x0123));
+    int32x4_t vacc5x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc5x4567, vnacc5x4567));
+    int32x4_t vacc5x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc5x89AB, vnacc5x89AB));
+    int32x4_t vacc5xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc5xCDEF, vnacc5xCDEF));
+    int32x4_t vacc6x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc6x0123, vnacc6x0123));
+    int32x4_t vacc6x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc6x4567, vnacc6x4567));
+    int32x4_t vacc6x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc6x89AB, vnacc6x89AB));
+    int32x4_t vacc6xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc6xCDEF, vnacc6xCDEF));
+    int32x4_t vacc7x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc7x0123, vnacc7x0123));
+    int32x4_t vacc7x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc7x4567, vnacc7x4567));
+    int32x4_t vacc7x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc7x89AB, vnacc7x89AB));
+    int32x4_t vacc7xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc7xCDEF, vnacc7xCDEF));
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+    vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift);
+    vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift);
+    vacc4x0123 = vshlq_s32(vacc4x0123, vright_pre_shift);
+    vacc4x4567 = vshlq_s32(vacc4x4567, vright_pre_shift);
+    vacc4x89AB = vshlq_s32(vacc4x89AB, vright_pre_shift);
+    vacc4xCDEF = vshlq_s32(vacc4xCDEF, vright_pre_shift);
+    vacc5x0123 = vshlq_s32(vacc5x0123, vright_pre_shift);
+    vacc5x4567 = vshlq_s32(vacc5x4567, vright_pre_shift);
+    vacc5x89AB = vshlq_s32(vacc5x89AB, vright_pre_shift);
+    vacc5xCDEF = vshlq_s32(vacc5xCDEF, vright_pre_shift);
+    vacc6x0123 = vshlq_s32(vacc6x0123, vright_pre_shift);
+    vacc6x4567 = vshlq_s32(vacc6x4567, vright_pre_shift);
+    vacc6x89AB = vshlq_s32(vacc6x89AB, vright_pre_shift);
+    vacc6xCDEF = vshlq_s32(vacc6xCDEF, vright_pre_shift);
+    vacc7x0123 = vshlq_s32(vacc7x0123, vright_pre_shift);
+    vacc7x4567 = vshlq_s32(vacc7x4567, vright_pre_shift);
+    vacc7x89AB = vshlq_s32(vacc7x89AB, vright_pre_shift);
+    vacc7xCDEF = vshlq_s32(vacc7xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+    vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
+    vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
+    vacc4x0123 = vqdmulhq_s32(vacc4x0123, vmultiplier);
+    vacc4x4567 = vqdmulhq_s32(vacc4x4567, vmultiplier);
+    vacc4x89AB = vqdmulhq_s32(vacc4x89AB, vmultiplier);
+    vacc4xCDEF = vqdmulhq_s32(vacc4xCDEF, vmultiplier);
+    vacc5x0123 = vqdmulhq_s32(vacc5x0123, vmultiplier);
+    vacc5x4567 = vqdmulhq_s32(vacc5x4567, vmultiplier);
+    vacc5x89AB = vqdmulhq_s32(vacc5x89AB, vmultiplier);
+    vacc5xCDEF = vqdmulhq_s32(vacc5xCDEF, vmultiplier);
+    vacc6x0123 = vqdmulhq_s32(vacc6x0123, vmultiplier);
+    vacc6x4567 = vqdmulhq_s32(vacc6x4567, vmultiplier);
+    vacc6x89AB = vqdmulhq_s32(vacc6x89AB, vmultiplier);
+    vacc6xCDEF = vqdmulhq_s32(vacc6xCDEF, vmultiplier);
+    vacc7x0123 = vqdmulhq_s32(vacc7x0123, vmultiplier);
+    vacc7x4567 = vqdmulhq_s32(vacc7x4567, vmultiplier);
+    vacc7x89AB = vqdmulhq_s32(vacc7x89AB, vmultiplier);
+    vacc7xCDEF = vqdmulhq_s32(vacc7xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+    vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
+    vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
+    vacc4x0123 = vrshlq_s32(vacc4x0123, vright_post_shift);
+    vacc4x4567 = vrshlq_s32(vacc4x4567, vright_post_shift);
+    vacc4x89AB = vrshlq_s32(vacc4x89AB, vright_post_shift);
+    vacc4xCDEF = vrshlq_s32(vacc4xCDEF, vright_post_shift);
+    vacc5x0123 = vrshlq_s32(vacc5x0123, vright_post_shift);
+    vacc5x4567 = vrshlq_s32(vacc5x4567, vright_post_shift);
+    vacc5x89AB = vrshlq_s32(vacc5x89AB, vright_post_shift);
+    vacc5xCDEF = vrshlq_s32(vacc5xCDEF, vright_post_shift);
+    vacc6x0123 = vrshlq_s32(vacc6x0123, vright_post_shift);
+    vacc6x4567 = vrshlq_s32(vacc6x4567, vright_post_shift);
+    vacc6x89AB = vrshlq_s32(vacc6x89AB, vright_post_shift);
+    vacc6xCDEF = vrshlq_s32(vacc6xCDEF, vright_post_shift);
+    vacc7x0123 = vrshlq_s32(vacc7x0123, vright_post_shift);
+    vacc7x4567 = vrshlq_s32(vacc7x4567, vright_post_shift);
+    vacc7x89AB = vrshlq_s32(vacc7x89AB, vright_post_shift);
+    vacc7xCDEF = vrshlq_s32(vacc7xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
+    const int16x8_t vacc4x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x0123), vacc4x4567), voutput_zero_point);
+    const int16x8_t vacc4x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x89AB), vacc4xCDEF), voutput_zero_point);
+    const int16x8_t vacc5x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc5x0123), vacc5x4567), voutput_zero_point);
+    const int16x8_t vacc5x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc5x89AB), vacc5xCDEF), voutput_zero_point);
+    const int16x8_t vacc6x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc6x0123), vacc6x4567), voutput_zero_point);
+    const int16x8_t vacc6x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc6x89AB), vacc6xCDEF), voutput_zero_point);
+    const int16x8_t vacc7x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc7x0123), vacc7x4567), voutput_zero_point);
+    const int16x8_t vacc7x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc7x89AB), vacc7xCDEF), voutput_zero_point);
+
+    uint8x16_t vout0x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc0x89ABCDEF);
+    uint8x16_t vout1x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc1x01234567), vacc1x89ABCDEF);
+    uint8x16_t vout2x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc2x01234567), vacc2x89ABCDEF);
+    uint8x16_t vout3x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc3x01234567), vacc3x89ABCDEF);
+    uint8x16_t vout4x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc4x01234567), vacc4x89ABCDEF);
+    uint8x16_t vout5x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc5x01234567), vacc5x89ABCDEF);
+    uint8x16_t vout6x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc6x01234567), vacc6x89ABCDEF);
+    uint8x16_t vout7x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc7x01234567), vacc7x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
+    const int16x8_t vacc4x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x0123), vqmovn_s32(vacc4x4567)), voutput_zero_point);
+    const int16x8_t vacc4x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x89AB), vqmovn_s32(vacc4xCDEF)), voutput_zero_point);
+    const int16x8_t vacc5x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc5x0123), vqmovn_s32(vacc5x4567)), voutput_zero_point);
+    const int16x8_t vacc5x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc5x89AB), vqmovn_s32(vacc5xCDEF)), voutput_zero_point);
+    const int16x8_t vacc6x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc6x0123), vqmovn_s32(vacc6x4567)), voutput_zero_point);
+    const int16x8_t vacc6x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc6x89AB), vqmovn_s32(vacc6xCDEF)), voutput_zero_point);
+    const int16x8_t vacc7x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc7x0123), vqmovn_s32(vacc7x4567)), voutput_zero_point);
+    const int16x8_t vacc7x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc7x89AB), vqmovn_s32(vacc7xCDEF)), voutput_zero_point);
+
+    uint8x16_t vout0x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc0x89ABCDEF));
+    uint8x16_t vout1x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc1x01234567), vqmovun_s16(vacc1x89ABCDEF));
+    uint8x16_t vout2x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc2x01234567), vqmovun_s16(vacc2x89ABCDEF));
+    uint8x16_t vout3x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc3x01234567), vqmovun_s16(vacc3x89ABCDEF));
+    uint8x16_t vout4x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc4x01234567), vqmovun_s16(vacc4x89ABCDEF));
+    uint8x16_t vout5x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc5x01234567), vqmovun_s16(vacc5x89ABCDEF));
+    uint8x16_t vout6x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc6x01234567), vqmovun_s16(vacc6x89ABCDEF));
+    uint8x16_t vout7x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc7x01234567), vqmovun_s16(vacc7x89ABCDEF));
+#endif
+    const uint8x16_t voutput_min = vld1q_dup_u8(&params->rndnu_neon.output_min);
+    const uint8x16_t voutput_max = vld1q_dup_u8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_u8(vout0x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_u8(vout1x0123456789ABCDEF, voutput_min);
+    vout2x0123456789ABCDEF = vmaxq_u8(vout2x0123456789ABCDEF, voutput_min);
+    vout3x0123456789ABCDEF = vmaxq_u8(vout3x0123456789ABCDEF, voutput_min);
+    vout4x0123456789ABCDEF = vmaxq_u8(vout4x0123456789ABCDEF, voutput_min);
+    vout5x0123456789ABCDEF = vmaxq_u8(vout5x0123456789ABCDEF, voutput_min);
+    vout6x0123456789ABCDEF = vmaxq_u8(vout6x0123456789ABCDEF, voutput_min);
+    vout7x0123456789ABCDEF = vmaxq_u8(vout7x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_u8(vout0x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_u8(vout1x0123456789ABCDEF, voutput_max);
+    vout2x0123456789ABCDEF = vminq_u8(vout2x0123456789ABCDEF, voutput_max);
+    vout3x0123456789ABCDEF = vminq_u8(vout3x0123456789ABCDEF, voutput_max);
+    vout4x0123456789ABCDEF = vminq_u8(vout4x0123456789ABCDEF, voutput_max);
+    vout5x0123456789ABCDEF = vminq_u8(vout5x0123456789ABCDEF, voutput_max);
+    vout6x0123456789ABCDEF = vminq_u8(vout6x0123456789ABCDEF, voutput_max);
+    vout7x0123456789ABCDEF = vminq_u8(vout7x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_u8(c7 + 0, vout7x0123456789ABCDEF);
+      vst1q_u8(c6 + 0, vout6x0123456789ABCDEF);
+      vst1q_u8(c5 + 0, vout5x0123456789ABCDEF);
+      vst1q_u8(c4 + 0, vout4x0123456789ABCDEF);
+      vst1q_u8(c3 + 0, vout3x0123456789ABCDEF);
+      vst1q_u8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_u8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_u8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c7 = (uint8_t*) ((uintptr_t) c7 + cn_stride);
+      c6 = (uint8_t*) ((uintptr_t) c6 + cn_stride);
+      c5 = (uint8_t*) ((uintptr_t) c5 + cn_stride);
+      c4 = (uint8_t*) ((uintptr_t) c4 + cn_stride);
+      c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
+      c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const uint8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      uint8x16_t vout6x01234567_7x01234567 = vcombine_u8(vget_low_u8(vout6x0123456789ABCDEF), vget_low_u8(vout7x0123456789ABCDEF));
+      uint8x16_t vout4x01234567_5x01234567 = vcombine_u8(vget_low_u8(vout4x0123456789ABCDEF), vget_low_u8(vout5x0123456789ABCDEF));
+      uint8x16_t vout2x01234567_3x01234567 = vcombine_u8(vget_low_u8(vout2x0123456789ABCDEF), vget_low_u8(vout3x0123456789ABCDEF));
+      uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vget_low_u8(vout0x0123456789ABCDEF), vget_low_u8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_u8(c7, vget_high_u8(vout6x01234567_7x01234567)); c7 += 8;
+        vst1_u8(c6, vget_low_u8(vout6x01234567_7x01234567)); c6 += 8;
+        vst1_u8(c5, vget_high_u8(vout4x01234567_5x01234567)); c5 += 8;
+        vst1_u8(c4, vget_low_u8(vout4x01234567_5x01234567)); c4 += 8;
+        vst1_u8(c3, vget_high_u8(vout2x01234567_3x01234567)); c3 += 8;
+        vst1_u8(c2, vget_low_u8(vout2x01234567_3x01234567)); c2 += 8;
+        vst1_u8(c1, vget_high_u8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_u8(c0, vget_low_u8(vout0x01234567_1x01234567)); c0 += 8;
+        vout6x01234567_7x01234567 = vcombine_u8(vget_high_u8(vout6x0123456789ABCDEF), vget_high_u8(vout7x0123456789ABCDEF));
+        vout4x01234567_5x01234567 = vcombine_u8(vget_high_u8(vout4x0123456789ABCDEF), vget_high_u8(vout5x0123456789ABCDEF));
+        vout2x01234567_3x01234567 = vcombine_u8(vget_high_u8(vout2x0123456789ABCDEF), vget_high_u8(vout3x0123456789ABCDEF));
+        vout0x01234567_1x01234567 = vcombine_u8(vget_high_u8(vout0x0123456789ABCDEF), vget_high_u8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c7, 1), vreinterpretq_u32_u8(vout6x01234567_7x01234567), 2); c7 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c6, 1), vreinterpretq_u32_u8(vout6x01234567_7x01234567), 0); c6 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c5, 1), vreinterpretq_u32_u8(vout4x01234567_5x01234567), 2); c5 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c4, 1), vreinterpretq_u32_u8(vout4x01234567_5x01234567), 0); c4 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout6x01234567_7x01234567 = vextq_u8(vout6x01234567_7x01234567, vout6x01234567_7x01234567, 4);
+        vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 4);
+        vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c7, 1), vreinterpretq_u16_u8(vout6x01234567_7x01234567), 4); c7 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c6, 1), vreinterpretq_u16_u8(vout6x01234567_7x01234567), 0); c6 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c5, 1), vreinterpretq_u16_u8(vout4x01234567_5x01234567), 4); c5 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c4, 1), vreinterpretq_u16_u8(vout4x01234567_5x01234567), 0); c4 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_u8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_u8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+        vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 2);
+        vout6x01234567_7x01234567 = vextq_u8(vout6x01234567_7x01234567, vout6x01234567_7x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_u8(c7, vout6x01234567_7x01234567, 8);
+        vst1q_lane_u8(c6, vout6x01234567_7x01234567, 0);
+        vst1q_lane_u8(c5, vout4x01234567_5x01234567, 8);
+        vst1q_lane_u8(c4, vout4x01234567_5x01234567, 0);
+        vst1q_lane_u8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_u8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qu8-igemm/gen/8x8c4-minmax-rndnu-neondot.c b/src/qu8-igemm/gen/8x8c4-minmax-rndnu-neondot.c
new file mode 100644
index 0000000..5d0fe61
--- /dev/null
+++ b/src/qu8-igemm/gen/8x8c4-minmax-rndnu-neondot.c
@@ -0,0 +1,470 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qu8-igemm/c4-neondot.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/igemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const uint8_t** restrict a,
+    const void* restrict w,
+    uint8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const uint8_t* zero,
+    const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 8);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (8 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(uint8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 4 * sizeof(uint8_t));
+  uint8_t* c0 = c;
+  uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 4) {
+    c3 = c2;
+  }
+  uint8_t* c4 = (uint8_t*) ((uintptr_t) c3 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 4) {
+    c4 = c3;
+  }
+  uint8_t* c5 = (uint8_t*) ((uintptr_t) c4 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 6) {
+    c5 = c4;
+  }
+  uint8_t* c6 = (uint8_t*) ((uintptr_t) c5 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 6) {
+    c6 = c5;
+  }
+  uint8_t* c7 = (uint8_t*) ((uintptr_t) c6 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 8) {
+    c7 = c6;
+  }
+
+  const uint8x16_t vb_zero_point = vld1q_dup_u8(&params->rndnu_neon.kernel_zero_point[0]);
+
+  do {
+    // Initialize accumulators with bias. 8 bias values are loaded from the
+    // weight matrix, at the start of the group of 8 columns.
+    uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
+    uint32x4_t vpacc1x0123 = vpacc0x0123;
+    uint32x4_t vpacc1x4567 = vpacc0x4567;
+    uint32x4_t vpacc2x0123 = vpacc0x0123;
+    uint32x4_t vpacc2x4567 = vpacc0x4567;
+    uint32x4_t vpacc3x0123 = vpacc0x0123;
+    uint32x4_t vpacc3x4567 = vpacc0x4567;
+    uint32x4_t vpacc4x0123 = vpacc0x0123;
+    uint32x4_t vpacc4x4567 = vpacc0x4567;
+    uint32x4_t vpacc5x0123 = vpacc0x0123;
+    uint32x4_t vpacc5x4567 = vpacc0x4567;
+    uint32x4_t vpacc6x0123 = vpacc0x0123;
+    uint32x4_t vpacc6x4567 = vpacc0x4567;
+    uint32x4_t vpacc7x0123 = vpacc0x0123;
+    uint32x4_t vpacc7x4567 = vpacc0x4567;
+    uint32x4_t vnacc0x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc0x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc1x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc1x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc2x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc2x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc3x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc3x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc4x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc4x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc5x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc5x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc6x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc6x4567 = vmovq_n_u32(0);
+    uint32x4_t vnacc7x0123 = vmovq_n_u32(0);
+    uint32x4_t vnacc7x4567 = vmovq_n_u32(0);
+
+    size_t p = ks;
+    do {
+      const uint8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const uint8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const uint8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      const uint8_t* restrict a3 = a[3];
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
+      }
+      const uint8_t* restrict a4 = a[4];
+      if XNN_UNPREDICTABLE(a4 != zero) {
+        a4 = (const uint8_t*) ((uintptr_t) a4 + a_offset);
+      }
+      const uint8_t* restrict a5 = a[5];
+      if XNN_UNPREDICTABLE(a5 != zero) {
+        a5 = (const uint8_t*) ((uintptr_t) a5 + a_offset);
+      }
+      const uint8_t* restrict a6 = a[6];
+      if XNN_UNPREDICTABLE(a6 != zero) {
+        a6 = (const uint8_t*) ((uintptr_t) a6 + a_offset);
+      }
+      const uint8_t* restrict a7 = a[7];
+      if XNN_UNPREDICTABLE(a7 != zero) {
+        a7 = (const uint8_t*) ((uintptr_t) a7 + a_offset);
+      }
+      a += 8;
+
+      // Inner accumulation loop along the 8 columns.
+      size_t k = kc;
+      // 2x partial unrolled loop to load 8 bytes at a time.
+      while (k >= 8 * sizeof(uint8_t)) {
+        // Load a 8x8 block of activations.
+        const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
+        const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
+        const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 8;
+        const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 8;
+        const uint8x8_t va4x01234567 = vld1_u8(a4); a4 += 8;
+        const uint8x8_t va5x01234567 = vld1_u8(a5); a5 += 8;
+        const uint8x8_t va6x01234567 = vld1_u8(a6); a6 += 8;
+        const uint8x8_t va7x01234567 = vld1_u8(a7); a7 += 8;
+
+        // Load a 8x8 block of weights.
+        const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+        // Multiply-accumulate: 8x8 * 8x8 --> 8x8.
+        vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+        vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+        vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+        vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+        vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
+        vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 0);
+        vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
+        vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 0);
+        vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
+        vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 0);
+        vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
+        vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 0);
+        vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
+        vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 0);
+        vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
+        vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 0);
+        vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
+        vnacc4x0123 = vdotq_lane_u32(vnacc4x0123, vb_zero_point, va4x01234567, 0);
+        vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
+        vnacc4x4567 = vdotq_lane_u32(vnacc4x4567, vb_zero_point, va4x01234567, 0);
+        vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb0123x0123, va5x01234567, 0);
+        vnacc5x0123 = vdotq_lane_u32(vnacc5x0123, vb_zero_point, va5x01234567, 0);
+        vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb0123x4567, va5x01234567, 0);
+        vnacc5x4567 = vdotq_lane_u32(vnacc5x4567, vb_zero_point, va5x01234567, 0);
+        vpacc6x0123 = vdotq_lane_u32(vpacc6x0123, vb0123x0123, va6x01234567, 0);
+        vnacc6x0123 = vdotq_lane_u32(vnacc6x0123, vb_zero_point, va6x01234567, 0);
+        vpacc6x4567 = vdotq_lane_u32(vpacc6x4567, vb0123x4567, va6x01234567, 0);
+        vnacc6x4567 = vdotq_lane_u32(vnacc6x4567, vb_zero_point, va6x01234567, 0);
+        vpacc7x0123 = vdotq_lane_u32(vpacc7x0123, vb0123x0123, va7x01234567, 0);
+        vnacc7x0123 = vdotq_lane_u32(vnacc7x0123, vb_zero_point, va7x01234567, 0);
+        vpacc7x4567 = vdotq_lane_u32(vpacc7x4567, vb0123x4567, va7x01234567, 0);
+        vnacc7x4567 = vdotq_lane_u32(vnacc7x4567, vb_zero_point, va7x01234567, 0);
+        vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
+        vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 1);
+        vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
+        vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 1);
+        vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
+        vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 1);
+        vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
+        vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 1);
+        vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb4567x0123, va2x01234567, 1);
+        vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 1);
+        vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb4567x4567, va2x01234567, 1);
+        vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 1);
+        vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb4567x0123, va3x01234567, 1);
+        vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 1);
+        vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb4567x4567, va3x01234567, 1);
+        vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 1);
+        vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb4567x0123, va4x01234567, 1);
+        vnacc4x0123 = vdotq_lane_u32(vnacc4x0123, vb_zero_point, va4x01234567, 1);
+        vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb4567x4567, va4x01234567, 1);
+        vnacc4x4567 = vdotq_lane_u32(vnacc4x4567, vb_zero_point, va4x01234567, 1);
+        vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb4567x0123, va5x01234567, 1);
+        vnacc5x0123 = vdotq_lane_u32(vnacc5x0123, vb_zero_point, va5x01234567, 1);
+        vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb4567x4567, va5x01234567, 1);
+        vnacc5x4567 = vdotq_lane_u32(vnacc5x4567, vb_zero_point, va5x01234567, 1);
+        vpacc6x0123 = vdotq_lane_u32(vpacc6x0123, vb4567x0123, va6x01234567, 1);
+        vnacc6x0123 = vdotq_lane_u32(vnacc6x0123, vb_zero_point, va6x01234567, 1);
+        vpacc6x4567 = vdotq_lane_u32(vpacc6x4567, vb4567x4567, va6x01234567, 1);
+        vnacc6x4567 = vdotq_lane_u32(vnacc6x4567, vb_zero_point, va6x01234567, 1);
+        vpacc7x0123 = vdotq_lane_u32(vpacc7x0123, vb4567x0123, va7x01234567, 1);
+        vnacc7x0123 = vdotq_lane_u32(vnacc7x0123, vb_zero_point, va7x01234567, 1);
+        vpacc7x4567 = vdotq_lane_u32(vpacc7x4567, vb4567x4567, va7x01234567, 1);
+        vnacc7x4567 = vdotq_lane_u32(vnacc7x4567, vb_zero_point, va7x01234567, 1);
+
+        k -= 8 * sizeof(uint8_t);
+      }
+      // Handle up to 4 final positions of `k`
+      if XNN_UNLIKELY(k != 0) {
+        // Load a 8x4 block of activations.
+        const uint8x8_t va0x01234567 = vld1_u8(a0);
+        const uint8x8_t va1x01234567 = vld1_u8(a1);
+        const uint8x8_t va2x01234567 = vld1_u8(a2);
+        const uint8x8_t va3x01234567 = vld1_u8(a3);
+        const uint8x8_t va4x01234567 = vld1_u8(a4);
+        const uint8x8_t va5x01234567 = vld1_u8(a5);
+        const uint8x8_t va6x01234567 = vld1_u8(a6);
+        const uint8x8_t va7x01234567 = vld1_u8(a7);
+
+        // Load a 4x8 block of weights.
+        const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+        const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
+
+        // Multiply-accumulate: 8x4 * 4x8 --> 8x8.
+        vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
+        vnacc0x0123 = vdotq_lane_u32(vnacc0x0123, vb_zero_point, va0x01234567, 0);
+        vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
+        vnacc0x4567 = vdotq_lane_u32(vnacc0x4567, vb_zero_point, va0x01234567, 0);
+        vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
+        vnacc1x0123 = vdotq_lane_u32(vnacc1x0123, vb_zero_point, va1x01234567, 0);
+        vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
+        vnacc1x4567 = vdotq_lane_u32(vnacc1x4567, vb_zero_point, va1x01234567, 0);
+        vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
+        vnacc2x0123 = vdotq_lane_u32(vnacc2x0123, vb_zero_point, va2x01234567, 0);
+        vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
+        vnacc2x4567 = vdotq_lane_u32(vnacc2x4567, vb_zero_point, va2x01234567, 0);
+        vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
+        vnacc3x0123 = vdotq_lane_u32(vnacc3x0123, vb_zero_point, va3x01234567, 0);
+        vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
+        vnacc3x4567 = vdotq_lane_u32(vnacc3x4567, vb_zero_point, va3x01234567, 0);
+        vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
+        vnacc4x0123 = vdotq_lane_u32(vnacc4x0123, vb_zero_point, va4x01234567, 0);
+        vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
+        vnacc4x4567 = vdotq_lane_u32(vnacc4x4567, vb_zero_point, va4x01234567, 0);
+        vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb0123x0123, va5x01234567, 0);
+        vnacc5x0123 = vdotq_lane_u32(vnacc5x0123, vb_zero_point, va5x01234567, 0);
+        vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb0123x4567, va5x01234567, 0);
+        vnacc5x4567 = vdotq_lane_u32(vnacc5x4567, vb_zero_point, va5x01234567, 0);
+        vpacc6x0123 = vdotq_lane_u32(vpacc6x0123, vb0123x0123, va6x01234567, 0);
+        vnacc6x0123 = vdotq_lane_u32(vnacc6x0123, vb_zero_point, va6x01234567, 0);
+        vpacc6x4567 = vdotq_lane_u32(vpacc6x4567, vb0123x4567, va6x01234567, 0);
+        vnacc6x4567 = vdotq_lane_u32(vnacc6x4567, vb_zero_point, va6x01234567, 0);
+        vpacc7x0123 = vdotq_lane_u32(vpacc7x0123, vb0123x0123, va7x01234567, 0);
+        vnacc7x0123 = vdotq_lane_u32(vnacc7x0123, vb_zero_point, va7x01234567, 0);
+        vpacc7x4567 = vdotq_lane_u32(vpacc7x4567, vb0123x4567, va7x01234567, 0);
+        vnacc7x4567 = vdotq_lane_u32(vnacc7x4567, vb_zero_point, va7x01234567, 0);
+      }
+      p -= 8 * sizeof(void*);
+    } while (p != 0);
+
+    // Subtract zero point accumulators with accumulators.
+    int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
+    int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x4567));
+    int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
+    int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x4567));
+    int32x4_t vacc2x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x0123, vnacc2x0123));
+    int32x4_t vacc2x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x4567, vnacc2x4567));
+    int32x4_t vacc3x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x0123, vnacc3x0123));
+    int32x4_t vacc3x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x4567, vnacc3x4567));
+    int32x4_t vacc4x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x0123, vnacc4x0123));
+    int32x4_t vacc4x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x4567, vnacc4x4567));
+    int32x4_t vacc5x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc5x0123, vnacc5x0123));
+    int32x4_t vacc5x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc5x4567, vnacc5x4567));
+    int32x4_t vacc6x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc6x0123, vnacc6x0123));
+    int32x4_t vacc6x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc6x4567, vnacc6x4567));
+    int32x4_t vacc7x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc7x0123, vnacc7x0123));
+    int32x4_t vacc7x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc7x4567, vnacc7x4567));
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+    vacc4x0123 = vshlq_s32(vacc4x0123, vright_pre_shift);
+    vacc4x4567 = vshlq_s32(vacc4x4567, vright_pre_shift);
+    vacc5x0123 = vshlq_s32(vacc5x0123, vright_pre_shift);
+    vacc5x4567 = vshlq_s32(vacc5x4567, vright_pre_shift);
+    vacc6x0123 = vshlq_s32(vacc6x0123, vright_pre_shift);
+    vacc6x4567 = vshlq_s32(vacc6x4567, vright_pre_shift);
+    vacc7x0123 = vshlq_s32(vacc7x0123, vright_pre_shift);
+    vacc7x4567 = vshlq_s32(vacc7x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+    vacc4x0123 = vqdmulhq_s32(vacc4x0123, vmultiplier);
+    vacc4x4567 = vqdmulhq_s32(vacc4x4567, vmultiplier);
+    vacc5x0123 = vqdmulhq_s32(vacc5x0123, vmultiplier);
+    vacc5x4567 = vqdmulhq_s32(vacc5x4567, vmultiplier);
+    vacc6x0123 = vqdmulhq_s32(vacc6x0123, vmultiplier);
+    vacc6x4567 = vqdmulhq_s32(vacc6x4567, vmultiplier);
+    vacc7x0123 = vqdmulhq_s32(vacc7x0123, vmultiplier);
+    vacc7x4567 = vqdmulhq_s32(vacc7x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+    vacc4x0123 = vrshlq_s32(vacc4x0123, vright_post_shift);
+    vacc4x4567 = vrshlq_s32(vacc4x4567, vright_post_shift);
+    vacc5x0123 = vrshlq_s32(vacc5x0123, vright_post_shift);
+    vacc5x4567 = vrshlq_s32(vacc5x4567, vright_post_shift);
+    vacc6x0123 = vrshlq_s32(vacc6x0123, vright_post_shift);
+    vacc6x4567 = vrshlq_s32(vacc6x4567, vright_post_shift);
+    vacc7x0123 = vrshlq_s32(vacc7x0123, vright_post_shift);
+    vacc7x4567 = vrshlq_s32(vacc7x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+    const int16x8_t vacc4x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x0123), vacc4x4567), voutput_zero_point);
+    const int16x8_t vacc5x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc5x0123), vacc5x4567), voutput_zero_point);
+    const int16x8_t vacc6x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc6x0123), vacc6x4567), voutput_zero_point);
+    const int16x8_t vacc7x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc7x0123), vacc7x4567), voutput_zero_point);
+
+    uint8x16_t vout0x01234567_1x01234567 = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc1x01234567);
+    uint8x16_t vout2x01234567_3x01234567 = vqmovun_high_s16(vqmovun_s16(vacc2x01234567), vacc3x01234567);
+    uint8x16_t vout4x01234567_5x01234567 = vqmovun_high_s16(vqmovun_s16(vacc4x01234567), vacc5x01234567);
+    uint8x16_t vout6x01234567_7x01234567 = vqmovun_high_s16(vqmovun_s16(vacc6x01234567), vacc7x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+    const int16x8_t vacc4x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x0123), vqmovn_s32(vacc4x4567)), voutput_zero_point);
+    const int16x8_t vacc5x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc5x0123), vqmovn_s32(vacc5x4567)), voutput_zero_point);
+    const int16x8_t vacc6x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc6x0123), vqmovn_s32(vacc6x4567)), voutput_zero_point);
+    const int16x8_t vacc7x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc7x0123), vqmovn_s32(vacc7x4567)), voutput_zero_point);
+
+    uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc1x01234567));
+    uint8x16_t vout2x01234567_3x01234567 = vcombine_u8(vqmovun_s16(vacc2x01234567), vqmovun_s16(vacc3x01234567));
+    uint8x16_t vout4x01234567_5x01234567 = vcombine_u8(vqmovun_s16(vacc4x01234567), vqmovun_s16(vacc5x01234567));
+    uint8x16_t vout6x01234567_7x01234567 = vcombine_u8(vqmovun_s16(vacc6x01234567), vqmovun_s16(vacc7x01234567));
+#endif
+    const uint8x16_t voutput_min = vld1q_dup_u8(&params->rndnu_neon.output_min);
+    const uint8x16_t voutput_max = vld1q_dup_u8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_u8(vout0x01234567_1x01234567, voutput_min);
+    vout2x01234567_3x01234567 = vmaxq_u8(vout2x01234567_3x01234567, voutput_min);
+    vout4x01234567_5x01234567 = vmaxq_u8(vout4x01234567_5x01234567, voutput_min);
+    vout6x01234567_7x01234567 = vmaxq_u8(vout6x01234567_7x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_u8(vout0x01234567_1x01234567, voutput_max);
+    vout2x01234567_3x01234567 = vminq_u8(vout2x01234567_3x01234567, voutput_max);
+    vout4x01234567_5x01234567 = vminq_u8(vout4x01234567_5x01234567, voutput_max);
+    vout6x01234567_7x01234567 = vminq_u8(vout6x01234567_7x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_u8(c7 + 0, vget_high_u8(vout6x01234567_7x01234567));
+      vst1_u8(c6 + 0, vget_low_u8(vout6x01234567_7x01234567));
+      vst1_u8(c5 + 0, vget_high_u8(vout4x01234567_5x01234567));
+      vst1_u8(c4 + 0, vget_low_u8(vout4x01234567_5x01234567));
+      vst1_u8(c3 + 0, vget_high_u8(vout2x01234567_3x01234567));
+      vst1_u8(c2 + 0, vget_low_u8(vout2x01234567_3x01234567));
+      vst1_u8(c1 + 0, vget_high_u8(vout0x01234567_1x01234567));
+      vst1_u8(c0 + 0, vget_low_u8(vout0x01234567_1x01234567));
+
+      c7 = (uint8_t*) ((uintptr_t) c7 + cn_stride);
+      c6 = (uint8_t*) ((uintptr_t) c6 + cn_stride);
+      c5 = (uint8_t*) ((uintptr_t) c5 + cn_stride);
+      c4 = (uint8_t*) ((uintptr_t) c4 + cn_stride);
+      c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
+      c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const uint8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c7, 1), vreinterpretq_u32_u8(vout6x01234567_7x01234567), 2); c7 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c6, 1), vreinterpretq_u32_u8(vout6x01234567_7x01234567), 0); c6 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c5, 1), vreinterpretq_u32_u8(vout4x01234567_5x01234567), 2); c5 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c4, 1), vreinterpretq_u32_u8(vout4x01234567_5x01234567), 0); c4 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout6x01234567_7x01234567 = vextq_u8(vout6x01234567_7x01234567, vout6x01234567_7x01234567, 4);
+        vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 4);
+        vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c7, 1), vreinterpretq_u16_u8(vout6x01234567_7x01234567), 4); c7 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c6, 1), vreinterpretq_u16_u8(vout6x01234567_7x01234567), 0); c6 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c5, 1), vreinterpretq_u16_u8(vout4x01234567_5x01234567), 4); c5 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c4, 1), vreinterpretq_u16_u8(vout4x01234567_5x01234567), 0); c4 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_u8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_u8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+        vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 2);
+        vout6x01234567_7x01234567 = vextq_u8(vout6x01234567_7x01234567, vout6x01234567_7x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_u8(c7, vout6x01234567_7x01234567, 8);
+        vst1q_lane_u8(c6, vout6x01234567_7x01234567, 0);
+        vst1q_lane_u8(c5, vout4x01234567_5x01234567, 8);
+        vst1q_lane_u8(c4, vout4x01234567_5x01234567, 0);
+        vst1q_lane_u8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_u8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/xnnpack/gemm.h b/src/xnnpack/gemm.h
index dfd8837..ef82976 100644
--- a/src/xnnpack/gemm.h
+++ b/src/xnnpack/gemm.h
@@ -516,6 +516,15 @@
 DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__aarch64_neon_mlal_lane_cortex_a75)
 DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__aarch64_neon_mlal_lane_prfm_cortex_a75)
 
+DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot)
+DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot)
+DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot)
+DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot)
+DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot)
+DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot)
+DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot)
+DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot)
+
 DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_gemmlowp_ukernel_4x4c2__sse2_ld64)
 DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_gemmlowp_ukernel_4x4c2__ssse3_ld64)
 DECLARE_QU8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_gemm_minmax_gemmlowp_ukernel_4x4c2__sse41_ld64)
diff --git a/src/xnnpack/igemm.h b/src/xnnpack/igemm.h
index 0e8148d..0f5aaee 100644
--- a/src/xnnpack/igemm.h
+++ b/src/xnnpack/igemm.h
@@ -325,6 +325,15 @@
 DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16__aarch64_neon_mlal_lane_cortex_a75)
 DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16__aarch64_neon_mlal_lane_prfm_cortex_a75)
 
+DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot)
+DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot)
+DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot)
+DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot)
+DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot)
+DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot)
+DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot)
+DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot)
+
 DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_gemmlowp_ukernel_4x4c2__sse2_ld64)
 DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_gemmlowp_ukernel_4x4c2__ssse3_ld64)
 DECLARE_QU8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qu8_igemm_minmax_gemmlowp_ukernel_4x4c2__sse41_ld64)
diff --git a/test/qu8-gemm-minmax-rndnu.cc b/test/qu8-gemm-minmax-rndnu.cc
index f0f3ac0..f1d3329 100644
--- a/test/qu8-gemm-minmax-rndnu.cc
+++ b/test/qu8-gemm-minmax-rndnu.cc
@@ -22,6 +22,4046 @@
 #include "gemm-microkernel-tester.h"
 
 
+#if XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, k_div_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, qmin) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, qmax) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, no_a_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_zero_point(0)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, no_b_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .b_zero_point(0)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X8C4__NEONDOT, no_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_zero_point(0)
+        .b_zero_point(0)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+#endif  // XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, k_div_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, qmin) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, qmax) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, no_a_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_zero_point(0)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, no_b_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .b_zero_point(0)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X8C4__NEONDOT, no_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_zero_point(0)
+        .b_zero_point(0)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+#endif  // XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(6)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(6)
+      .n(8)
+      .k(8)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(6)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(6)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(6)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(6)
+      .n(8)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 6; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 6; m++) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(8)
+        .k(k)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 6; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(8)
+        .k(k)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 6; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, k_div_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(8)
+        .k(k)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(8)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 6; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(6)
+          .n(8)
+          .k(k)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(6)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(6)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 6; m++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(6)
+          .n(8)
+          .k(k)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(6)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(6)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 6; m++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 6; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, qmin) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(6)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(6)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, qmax) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(6)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(6)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(6)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(6)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, no_a_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(8)
+        .k(k)
+        .a_zero_point(0)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, no_b_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(8)
+        .k(k)
+        .b_zero_point(0)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X8C4__NEONDOT, no_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(8)
+        .k(k)
+        .a_zero_point(0)
+        .b_zero_point(0)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+#endif  // XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(8)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(8)
+      .n(8)
+      .k(8)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(8)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(8)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(8)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(8)
+      .n(8)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 8; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 8; m++) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(8)
+        .k(k)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 8; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(8)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(8)
+        .k(k)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 8; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(8)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, k_div_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(8)
+        .k(k)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(8)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 8; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(8)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(8)
+          .n(8)
+          .k(k)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(8)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(8)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 8; m++) {
+          GemmMicrokernelTester()
+            .mr(8)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(8)
+          .n(8)
+          .k(k)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(8)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(8)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 8; m++) {
+          GemmMicrokernelTester()
+            .mr(8)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 8; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(8)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, qmin) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(8)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(8)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, qmax) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(8)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(8)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(8)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(8)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, no_a_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(8)
+        .k(k)
+        .a_zero_point(0)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, no_b_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(8)
+        .k(k)
+        .b_zero_point(0)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X8C4__NEONDOT, no_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(8)
+        .k(k)
+        .a_zero_point(0)
+        .b_zero_point(0)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+#endif  // XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, k_div_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, n_div_16) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, qmin) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, qmax) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, no_a_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .a_zero_point(0)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, no_b_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .b_zero_point(0)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_1X16C4__NEONDOT, no_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .a_zero_point(0)
+        .b_zero_point(0)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+#endif  // XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, k_div_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, n_div_16) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, qmin) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, qmax) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, no_a_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .a_zero_point(0)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, no_b_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .b_zero_point(0)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_4X16C4__NEONDOT, no_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .a_zero_point(0)
+        .b_zero_point(0)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+#endif  // XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(6)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(6)
+      .n(16)
+      .k(8)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(6)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(6)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(6)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(6)
+      .n(16)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 6; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 6; m++) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(16)
+        .k(k)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(16)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 6; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(16)
+        .k(k)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(16)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 6; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, k_div_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(16)
+        .k(k)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(16)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 6; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(6)
+          .n(16)
+          .k(k)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(6)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(6)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 6; m++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, n_div_16) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(6)
+          .n(16)
+          .k(k)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(6)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(6)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 6; m++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 6; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, qmin) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(6)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(6)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, qmax) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(6)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(6)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(6)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(6)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, no_a_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(16)
+        .k(k)
+        .a_zero_point(0)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, no_b_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(16)
+        .k(k)
+        .b_zero_point(0)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_6X16C4__NEONDOT, no_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(16)
+        .k(k)
+        .a_zero_point(0)
+        .b_zero_point(0)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+#endif  // XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(8)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(8)
+      .n(16)
+      .k(8)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(8)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(8)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(8)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(8)
+      .n(16)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 8; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 8; m++) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(16)
+        .k(k)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(16)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 8; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(8)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(16)
+        .k(k)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(16)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 8; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(8)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, k_div_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(16)
+        .k(k)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(16)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 8; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(8)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(8)
+          .n(16)
+          .k(k)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(8)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(8)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 8; m++) {
+          GemmMicrokernelTester()
+            .mr(8)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, n_div_16) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(8)
+          .n(16)
+          .k(k)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(8)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(8)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 8; m++) {
+          GemmMicrokernelTester()
+            .mr(8)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 8; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(8)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, qmin) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(8)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(8)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, qmax) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(8)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(8)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(8)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(8)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, no_a_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(16)
+        .k(k)
+        .a_zero_point(0)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, no_b_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(16)
+        .k(k)
+        .b_zero_point(0)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_GEMM_MINMAX_RNDNU_8X16C4__NEONDOT, no_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(16)
+        .k(k)
+        .a_zero_point(0)
+        .b_zero_point(0)
+        .Test(xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+#endif  // XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+
+
 #if XNN_ARCH_ARM64
   TEST(QU8_GEMM_MINMAX_RNDNU_4X16__AARCH64_NEON_MLAL_LANE_PRFM_CORTEX_A75, k_eq_8) {
     TEST_REQUIRES_ARM_NEON;
diff --git a/test/qu8-gemm-minmax-rndnu.yaml b/test/qu8-gemm-minmax-rndnu.yaml
index 59c1423..c42833d 100644
--- a/test/qu8-gemm-minmax-rndnu.yaml
+++ b/test/qu8-gemm-minmax-rndnu.yaml
@@ -3,6 +3,30 @@
 # This source code is licensed under the BSD-style license found in the
 # LICENSE file in the root directory of this source tree.
 
+- name: xnn_qu8_gemm_minmax_rndnu_ukernel_1x8c4__neondot
+  init: xnn_init_qu8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qu8_gemm_minmax_rndnu_ukernel_4x8c4__neondot
+  init: xnn_init_qu8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot
+  init: xnn_init_qu8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot
+  init: xnn_init_qu8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qu8_gemm_minmax_rndnu_ukernel_1x16c4__neondot
+  init: xnn_init_qu8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot
+  init: xnn_init_qu8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot
+  init: xnn_init_qu8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qu8_gemm_minmax_rndnu_ukernel_8x16c4__neondot
+  init: xnn_init_qu8_conv_minmax_rndnu_neon_params
+  k-block: 8
 - name: xnn_qu8_gemm_minmax_rndnu_ukernel_4x16__aarch64_neon_mlal_lane_prfm_cortex_a75
   init: xnn_init_qu8_conv_minmax_rndnu_neon_params
   k-block: 8
diff --git a/test/qu8-igemm-minmax-rndnu.cc b/test/qu8-igemm-minmax-rndnu.cc
index 0ef1076..24438a7 100644
--- a/test/qu8-igemm-minmax-rndnu.cc
+++ b/test/qu8-igemm-minmax-rndnu.cc
@@ -22,6 +22,4142 @@
 #include "gemm-microkernel-tester.h"
 
 
+#if XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, k_div_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, small_kernel) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, a_offset) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(43)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, zero) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t mz = 0; mz < 1; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(43)
+          .zero_index(mz)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, qmin) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, qmax) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, no_a_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_zero_point(0)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, no_b_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .b_zero_point(0)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X8C4__NEONDOT, no_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_zero_point(0)
+        .b_zero_point(0)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+#endif  // XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, k_div_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, small_kernel) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, a_offset) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(163)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, zero) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t mz = 0; mz < 4; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(163)
+          .zero_index(mz)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, qmin) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, qmax) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, no_a_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_zero_point(0)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, no_b_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .b_zero_point(0)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X8C4__NEONDOT, no_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_zero_point(0)
+        .b_zero_point(0)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+#endif  // XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(6)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(6)
+      .n(8)
+      .k(8)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(6)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(6)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 6; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 6; m++) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(8)
+        .k(k)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 6; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(8)
+        .k(k)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 6; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, k_div_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(8)
+        .k(k)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 6; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(6)
+          .n(8)
+          .k(k)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(6)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 6; m++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(6)
+          .n(8)
+          .k(k)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(6)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 6; m++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, small_kernel) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 6; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(6)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(6)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 6; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, a_offset) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(251)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, zero) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t mz = 0; mz < 6; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(6)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(251)
+          .zero_index(mz)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, qmin) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(6)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(6)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, qmax) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(6)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(6)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(6)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(6)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, no_a_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(8)
+        .k(k)
+        .a_zero_point(0)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, no_b_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(8)
+        .k(k)
+        .b_zero_point(0)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X8C4__NEONDOT, no_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(8)
+        .k(k)
+        .a_zero_point(0)
+        .b_zero_point(0)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+#endif  // XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(8)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(8)
+      .n(8)
+      .k(8)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(8)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(8)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 8; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 8; m++) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(8)
+        .k(k)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 8; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(8)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(8)
+        .k(k)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 8; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(8)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, k_div_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(8)
+        .k(k)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 8; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(8)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(8)
+          .n(8)
+          .k(k)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(8)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 8; m++) {
+          GemmMicrokernelTester()
+            .mr(8)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(8)
+          .n(8)
+          .k(k)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(8)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 8; m++) {
+          GemmMicrokernelTester()
+            .mr(8)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, small_kernel) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 8; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(8)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(8)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(8)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 8; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(8)
+            .nr(8)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, a_offset) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(331)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, zero) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t mz = 0; mz < 8; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(8)
+          .kr(4)
+          .sr(1)
+          .m(8)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(331)
+          .zero_index(mz)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, qmin) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(8)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(8)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, qmax) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(8)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(8)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(8)
+      .nr(8)
+      .kr(4)
+      .sr(1)
+      .m(8)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, no_a_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(8)
+        .k(k)
+        .a_zero_point(0)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, no_b_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(8)
+        .k(k)
+        .b_zero_point(0)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X8C4__NEONDOT, no_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(8)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(8)
+        .k(k)
+        .a_zero_point(0)
+        .b_zero_point(0)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+#endif  // XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, k_div_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, n_div_16) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, small_kernel) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, a_offset) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(43)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, zero) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t mz = 0; mz < 1; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(43)
+          .zero_index(mz)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, qmin) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, qmax) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, no_a_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .a_zero_point(0)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, no_b_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .b_zero_point(0)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_1X16C4__NEONDOT, no_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .a_zero_point(0)
+        .b_zero_point(0)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+#endif  // XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, k_div_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, n_div_16) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, small_kernel) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, a_offset) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(163)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, zero) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t mz = 0; mz < 4; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(163)
+          .zero_index(mz)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, qmin) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, qmax) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, no_a_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .a_zero_point(0)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, no_b_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .b_zero_point(0)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_4X16C4__NEONDOT, no_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .a_zero_point(0)
+        .b_zero_point(0)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+#endif  // XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(6)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(6)
+      .n(16)
+      .k(8)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(6)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(6)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 6; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 6; m++) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(16)
+        .k(k)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 6; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(16)
+        .k(k)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 6; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, k_div_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(16)
+        .k(k)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 6; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(6)
+          .n(16)
+          .k(k)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(6)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 6; m++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, n_div_16) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(6)
+          .n(16)
+          .k(k)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(6)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 6; m++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, small_kernel) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 6; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(6)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(6)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 6; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, a_offset) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(251)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, zero) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t mz = 0; mz < 6; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(6)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(251)
+          .zero_index(mz)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, qmin) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(6)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(6)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, qmax) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(6)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(6)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(6)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(6)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, no_a_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(16)
+        .k(k)
+        .a_zero_point(0)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, no_b_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(16)
+        .k(k)
+        .b_zero_point(0)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_6X16C4__NEONDOT, no_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(6)
+        .n(16)
+        .k(k)
+        .a_zero_point(0)
+        .b_zero_point(0)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+#endif  // XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(8)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(8)
+      .n(16)
+      .k(8)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(8)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(8)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 8; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 8; m++) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(16)
+        .k(k)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 8; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(8)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(16)
+        .k(k)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 8; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(8)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, k_div_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(16)
+        .k(k)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 8; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(8)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(8)
+          .n(16)
+          .k(k)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(8)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 8; m++) {
+          GemmMicrokernelTester()
+            .mr(8)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, n_div_16) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(8)
+          .n(16)
+          .k(k)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(8)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 8; m++) {
+          GemmMicrokernelTester()
+            .mr(8)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, small_kernel) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 8; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(8)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(8)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(8)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 8; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(8)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, a_offset) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(331)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, zero) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t mz = 0; mz < 8; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(8)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(8)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(331)
+          .zero_index(mz)
+          .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, qmin) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(8)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(8)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, qmax) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(8)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(8)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(8)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(8)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, no_a_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(16)
+        .k(k)
+        .a_zero_point(0)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, no_b_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(16)
+        .k(k)
+        .b_zero_point(0)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+
+  TEST(QU8_IGEMM_MINMAX_RNDNU_8X16C4__NEONDOT, no_zero_point) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(8)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(8)
+        .n(16)
+        .k(k)
+        .a_zero_point(0)
+        .b_zero_point(0)
+        .Test(xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot, xnn_init_qu8_conv_minmax_rndnu_neon_params, xnn_init_qu8_requantization_rndnu_params, xnn_qu8_requantize_rndnu);
+    }
+  }
+#endif  // XNN_ARCH_ARM && !XNN_PLATFORM_IOS || XNN_ARCH_ARM64
+
+
 #if XNN_ARCH_ARM64
   TEST(QU8_IGEMM_MINMAX_RNDNU_4X16__AARCH64_NEON_MLAL_LANE_PRFM_CORTEX_A75, k_eq_8) {
     TEST_REQUIRES_ARM_NEON;
diff --git a/test/qu8-igemm-minmax-rndnu.yaml b/test/qu8-igemm-minmax-rndnu.yaml
index d1ba010..1b75977 100644
--- a/test/qu8-igemm-minmax-rndnu.yaml
+++ b/test/qu8-igemm-minmax-rndnu.yaml
@@ -3,6 +3,30 @@
 # This source code is licensed under the BSD-style license found in the
 # LICENSE file in the root directory of this source tree.
 
+- name: xnn_qu8_igemm_minmax_rndnu_ukernel_1x8c4__neondot
+  init: xnn_init_qu8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot
+  init: xnn_init_qu8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot
+  init: xnn_init_qu8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qu8_igemm_minmax_rndnu_ukernel_8x8c4__neondot
+  init: xnn_init_qu8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qu8_igemm_minmax_rndnu_ukernel_1x16c4__neondot
+  init: xnn_init_qu8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot
+  init: xnn_init_qu8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot
+  init: xnn_init_qu8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qu8_igemm_minmax_rndnu_ukernel_8x16c4__neondot
+  init: xnn_init_qu8_conv_minmax_rndnu_neon_params
+  k-block: 8
 - name: xnn_qu8_igemm_minmax_rndnu_ukernel_4x16__aarch64_neon_mlal_lane_prfm_cortex_a75
   init: xnn_init_qu8_conv_minmax_rndnu_neon_params
   k-block: 8