LD1R and LD2R variants of c2 microkernel

- Instead of 1 LD1 and 4 DUP, use 4 LD1R or 2 LD2R

PiperOrigin-RevId: 410613731
diff --git a/src/qs8-igemm/c2-neon-mull-dup.c.in b/src/qs8-igemm/c2-neon-mull-dup.c.in
index 9b9cb02..b6d95ad 100644
--- a/src/qs8-igemm/c2-neon-mull-dup.c.in
+++ b/src/qs8-igemm/c2-neon-mull-dup.c.in
@@ -8,6 +8,7 @@
 $assert 8 <= NR <= 16
 $assert REQUANTIZATION in ["FP32", "GEMMLOWP", "RNDNU"]
 $assert not CHANNELWISE or REQUANTIZATION == "FP32"
+$assert DUP in ["DUP", "LD1R", "LD2R", "LD4R"]
 #include <assert.h>
 
 #include <arm_neon.h>
@@ -23,7 +24,7 @@
 $if REQUANTIZATION == "FP32" and CHANNELWISE and not ARMV8:
   $PARAMS_STRUCT = "neon_fp32"
 $ISA = "neonv8" if ARMV8 else "neon"
-void xnn_${DATATYPE}_igemm_minmax_${REQUANTIZATION.lower()}_ukernel_${MR}x${NR}c2__${ISA}_${"mlal" if MLA else "mull"}_${"ld4r" if LD4R else "dup"}(
+void xnn_${DATATYPE}_igemm_minmax_${REQUANTIZATION.lower()}_ukernel_${MR}x${NR}c2__${ISA}_${"mlal" if MLA else "mull"}_${DUP.lower()}(
     size_t mr,
     size_t nc,
     size_t kc,
@@ -86,9 +87,23 @@
       $if MLA:
         while (k >= 16 * sizeof(int8_t)) {
           $for M in range(MR):
-            $if LD4R:
+            $if DUP == "LD4R":
               const int16x4x4_t va${M}x0 = vld4_dup_s16((const void*)a${M}); a${M} += 8;
               const int16x4x4_t va${M}x1 = vld4_dup_s16((const void*)a${M}); a${M} += 8;
+            $elif DUP == "LD2R":
+              const int16x4x2_t va${M}0x0 = vld2_dup_s16((const void*)a${M});
+              const int16x4x2_t va${M}1x0 = vld2_dup_s16((const void*)(a${M} + 4)); a${M} += 8;
+              const int16x4x2_t va${M}0x1 = vld2_dup_s16((const void*)a${M});
+              const int16x4x2_t va${M}1x1 = vld2_dup_s16((const void*)(a${M} + 4)); a${M} += 8;
+            $elif DUP == "LD1R":
+              const int16x4_t va${M}0x0 = vld1_dup_s16((const void*)a${M});
+              const int16x4_t va${M}1x0 = vld1_dup_s16((const void*)(a${M} + 2));
+              const int16x4_t va${M}2x0 = vld1_dup_s16((const void*)(a${M} + 4));
+              const int16x4_t va${M}3x0 = vld1_dup_s16((const void*)(a${M} + 6)); a${M} += 8;
+              const int16x4_t va${M}0x1 = vld1_dup_s16((const void*)a${M});
+              const int16x4_t va${M}1x1 = vld1_dup_s16((const void*)(a${M} + 2));
+              const int16x4_t va${M}2x1 = vld1_dup_s16((const void*)(a${M} + 4));
+              const int16x4_t va${M}3x1 = vld1_dup_s16((const void*)(a${M} + 6)); a${M} += 8;
             $else:
               const int8x8_t va${M}x0 = vld1_s8(a${M}); a${M} += 8;
               const int8x8_t va${M}x1 = vld1_s8(a${M}); a${M} += 8;
@@ -99,12 +114,19 @@
 
           $for K in range(4):
             $for M in range(MR):
-              $if LD4R:
+              $if DUP == "LD4R":
                 const int8x8_t va${M}c${K}x0 = vreinterpret_s8_s16(va${M}x0.val[${K}]);
                 const int8x8_t va${M}c${K}x1 = vreinterpret_s8_s16(va${M}x1.val[${K}]);
+              $elif DUP == "LD2R":
+                const int8x8_t va${M}c${K}x0 = vreinterpret_s8_s16(va${M}${int(K/2)}x0.val[${K%2}]);
+                const int8x8_t va${M}c${K}x1 = vreinterpret_s8_s16(va${M}${int(K/2)}x1.val[${K%2}]);
+              $elif DUP == "LD1R":
+                const int8x8_t va${M}c${K}x0 = vreinterpret_s8_s16(va${M}${K}x0);
+                const int8x8_t va${M}c${K}x1 = vreinterpret_s8_s16(va${M}${K}x1);
               $else:
                 const int8x8_t va${M}c${K}x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va${M}x0), ${K}));
                 const int8x8_t va${M}c${K}x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va${M}x1), ${K}));
+
             $for N in range(0, NR, 4):
               $for M in range(MR):
                 int16x8_t vprod${M}x${ABC[N:N+4]}c${K} = vmull_s8(vb${ABC[N:N+4]}c${K}x0, va${M}c${K}x0);
@@ -119,10 +141,18 @@
 
       ${"if" if MLA else "while"} (k >= 8 * sizeof(int8_t)) {
         $for M in range(MR):
-         $if LD4R:
-           const int16x4x4_t va${M} = vld4_dup_s16((const void*)a${M}); a${M} += 8;
-         $else:
-           const int8x8_t va${M} = vld1_s8(a${M}); a${M} += 8;
+          $if DUP == "LD4R":
+            const int16x4x4_t va${M} = vld4_dup_s16((const void*)a${M}); a${M} += 8;
+          $elif DUP == "LD2R":
+            const int16x4x2_t va${M}0 = vld2_dup_s16((const void*)a${M});
+            const int16x4x2_t va${M}1 = vld2_dup_s16((const void*)(a${M} + 4)); a${M} += 8;
+          $elif DUP == "LD1R":
+            const int16x4_t va${M}0 = vld1_dup_s16((const void*)a${M});
+            const int16x4_t va${M}1 = vld1_dup_s16((const void*)(a${M} + 2));
+            const int16x4_t va${M}2 = vld1_dup_s16((const void*)(a${M} + 4));
+            const int16x4_t va${M}3 = vld1_dup_s16((const void*)(a${M} + 6)); a${M} += 8;
+          $else:
+            const int8x8_t va${M} = vld1_s8(a${M}); a${M} += 8;
 
         $for K in range(4):
           $for N in range(0, NR, 4):
@@ -130,8 +160,12 @@
 
         $for K in range(4):
           $for M in range(MR):
-            $if LD4R:
+            $if DUP == "LD4R":
               const int8x8_t va${M}c${K} = vreinterpret_s8_s16(va${M}.val[${K}]);
+            $elif DUP == "LD2R":
+              const int8x8_t va${M}c${K} = vreinterpret_s8_s16(va${M}${int(K/2)}.val[${K%2}]);
+            $elif DUP == "LD1R":
+              const int8x8_t va${M}c${K} = vreinterpret_s8_s16(va${M}${K});
             $else:
               const int8x8_t va${M}c${K} = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va${M}), ${K}));
 
diff --git a/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-dup.c b/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-dup.c
index 4fe336c..731d298 100644
--- a/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-dup.c
+++ b/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-dup.c
@@ -81,6 +81,7 @@
 
         const int8x8_t va0c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 0));
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -99,6 +100,7 @@
         vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
         const int8x8_t va0c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 1));
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -117,6 +119,7 @@
         vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
         const int8x8_t va0c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 2));
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -135,6 +138,7 @@
         vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
         const int8x8_t va0c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 3));
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld1r.c b/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld1r.c
new file mode 100644
index 0000000..d72f568
--- /dev/null
+++ b/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld1r.c
@@ -0,0 +1,357 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
+        const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
+        const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
+        const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
+        const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
+        const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
+        const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
+        const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
+        const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+          }
+        }
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
+      if (nc & 8) {
+        vst1_s8(c0, vout0x01234567); c0 += 8;
+        vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
+      }
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld2r.c b/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld2r.c
new file mode 100644
index 0000000..0b0c8e9
--- /dev/null
+++ b/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld2r.c
@@ -0,0 +1,351 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
+        const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
+        const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
+        const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
+        const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
+        const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
+        const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
+        const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
+        const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+          }
+        }
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
+      if (nc & 8) {
+        vst1_s8(c0, vout0x01234567); c0 += 8;
+        vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
+      }
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld4r.c b/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld4r.c
index 23c1544..9398df3 100644
--- a/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld4r.c
+++ b/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld4r.c
@@ -81,6 +81,7 @@
 
         const int8x8_t va0c0x0 = vreinterpret_s8_s16(va0x0.val[0]);
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -99,6 +100,7 @@
         vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
         const int8x8_t va0c1x0 = vreinterpret_s8_s16(va0x0.val[1]);
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -117,6 +119,7 @@
         vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
         const int8x8_t va0c2x0 = vreinterpret_s8_s16(va0x0.val[2]);
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -135,6 +138,7 @@
         vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
         const int8x8_t va0c3x0 = vreinterpret_s8_s16(va0x0.val[3]);
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mull-ld1r.c b/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mull-ld1r.c
new file mode 100644
index 0000000..bbdf888
--- /dev/null
+++ b/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mull-ld1r.c
@@ -0,0 +1,251 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+          }
+        }
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
+      if (nc & 8) {
+        vst1_s8(c0, vout0x01234567); c0 += 8;
+        vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
+      }
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mull-ld2r.c b/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mull-ld2r.c
new file mode 100644
index 0000000..dd69de7
--- /dev/null
+++ b/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mull-ld2r.c
@@ -0,0 +1,249 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+          }
+        }
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
+      if (nc & 8) {
+        vst1_s8(c0, vout0x01234567); c0 += 8;
+        vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
+      }
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c
index 2eeb261..ae0b4b7 100644
--- a/src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c
+++ b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c
@@ -71,6 +71,7 @@
 
         const int8x8_t va0c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 0));
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -81,6 +82,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
         const int8x8_t va0c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 1));
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -91,6 +93,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
         const int8x8_t va0c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 2));
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -101,6 +104,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
         const int8x8_t va0c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 3));
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld1r.c b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld1r.c
new file mode 100644
index 0000000..efeba82
--- /dev/null
+++ b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld1r.c
@@ -0,0 +1,263 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          }
+        }
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neon.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+
+    const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_min_less_zero_point);
+    vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
+    vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
+
+    const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_max_less_zero_point);
+    vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
+    vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
+
+    const float32x4_t vmagic_bias = vld1q_dup_f32(&params->fp32_neon.magic_bias);
+    vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
+    vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
+
+    const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->fp32_neon.magic_bias_less_zero_point);
+    vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
+    vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
+
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
+
+    int8x8_t vout0x01234567 = vmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
+
+    int8x8_t vout0x01234567 = vmovn_s16(vacc0x01234567);
+#endif
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld2r.c b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld2r.c
new file mode 100644
index 0000000..15884fd
--- /dev/null
+++ b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld2r.c
@@ -0,0 +1,257 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          }
+        }
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neon.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+
+    const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_min_less_zero_point);
+    vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
+    vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
+
+    const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_max_less_zero_point);
+    vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
+    vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
+
+    const float32x4_t vmagic_bias = vld1q_dup_f32(&params->fp32_neon.magic_bias);
+    vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
+    vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
+
+    const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->fp32_neon.magic_bias_less_zero_point);
+    vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
+    vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
+
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
+
+    int8x8_t vout0x01234567 = vmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
+
+    int8x8_t vout0x01234567 = vmovn_s16(vacc0x01234567);
+#endif
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c
index 4ed1059..55c7a52 100644
--- a/src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c
+++ b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c
@@ -71,6 +71,7 @@
 
         const int8x8_t va0c0x0 = vreinterpret_s8_s16(va0x0.val[0]);
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -81,6 +82,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
         const int8x8_t va0c1x0 = vreinterpret_s8_s16(va0x0.val[1]);
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -91,6 +93,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
         const int8x8_t va0c2x0 = vreinterpret_s8_s16(va0x0.val[2]);
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -101,6 +104,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
         const int8x8_t va0c3x0 = vreinterpret_s8_s16(va0x0.val[3]);
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c
index f625f40..e80ae2b 100644
--- a/src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c
+++ b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c
@@ -72,6 +72,7 @@
 
         const int8x8_t va0c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 0));
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -82,6 +83,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
         const int8x8_t va0c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 1));
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -92,6 +94,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
         const int8x8_t va0c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 2));
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -102,6 +105,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
         const int8x8_t va0c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 3));
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld1r.c b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld1r.c
new file mode 100644
index 0000000..0f51636
--- /dev/null
+++ b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld1r.c
@@ -0,0 +1,258 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          }
+        }
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neonv8.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+
+    vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
+    vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->fp32_neonv8.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->fp32_neonv8.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld2r.c b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld2r.c
new file mode 100644
index 0000000..eadfc8f
--- /dev/null
+++ b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld2r.c
@@ -0,0 +1,252 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          }
+        }
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neonv8.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+
+    vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
+    vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->fp32_neonv8.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->fp32_neonv8.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c
index ee36e7e..a5bd984 100644
--- a/src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c
+++ b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c
@@ -72,6 +72,7 @@
 
         const int8x8_t va0c0x0 = vreinterpret_s8_s16(va0x0.val[0]);
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -82,6 +83,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
         const int8x8_t va0c1x0 = vreinterpret_s8_s16(va0x0.val[1]);
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -92,6 +94,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
         const int8x8_t va0c2x0 = vreinterpret_s8_s16(va0x0.val[2]);
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -102,6 +105,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
         const int8x8_t va0c3x0 = vreinterpret_s8_s16(va0x0.val[3]);
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-gemmlowp-neon-mlal-dup.c b/src/qs8-igemm/gen/1x8c2-minmax-gemmlowp-neon-mlal-dup.c
index 209cae5..d9bf45e 100644
--- a/src/qs8-igemm/gen/1x8c2-minmax-gemmlowp-neon-mlal-dup.c
+++ b/src/qs8-igemm/gen/1x8c2-minmax-gemmlowp-neon-mlal-dup.c
@@ -71,6 +71,7 @@
 
         const int8x8_t va0c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 0));
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -81,6 +82,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
         const int8x8_t va0c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 1));
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -91,6 +93,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
         const int8x8_t va0c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 2));
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -101,6 +104,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
         const int8x8_t va0c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 3));
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-dup.c b/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-dup.c
index a6d1de7..672bcc4 100644
--- a/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-dup.c
+++ b/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-dup.c
@@ -71,6 +71,7 @@
 
         const int8x8_t va0c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 0));
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -81,6 +82,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
         const int8x8_t va0c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 1));
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -91,6 +93,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
         const int8x8_t va0c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 2));
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -101,6 +104,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
         const int8x8_t va0c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 3));
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld1r.c b/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld1r.c
new file mode 100644
index 0000000..ef00e5a
--- /dev/null
+++ b/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld1r.c
@@ -0,0 +1,260 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          }
+        }
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->rndnu_neon.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld2r.c b/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld2r.c
new file mode 100644
index 0000000..de66b6c
--- /dev/null
+++ b/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld2r.c
@@ -0,0 +1,254 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          }
+        }
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->rndnu_neon.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld4r.c b/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld4r.c
index c42b556..cc5dc9b 100644
--- a/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld4r.c
+++ b/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld4r.c
@@ -71,6 +71,7 @@
 
         const int8x8_t va0c0x0 = vreinterpret_s8_s16(va0x0.val[0]);
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -81,6 +82,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
         const int8x8_t va0c1x0 = vreinterpret_s8_s16(va0x0.val[1]);
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -91,6 +93,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
         const int8x8_t va0c2x0 = vreinterpret_s8_s16(va0x0.val[2]);
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -101,6 +104,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
         const int8x8_t va0c3x0 = vreinterpret_s8_s16(va0x0.val[3]);
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mull-ld1r.c b/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mull-ld1r.c
new file mode 100644
index 0000000..f4e7c2c
--- /dev/null
+++ b/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mull-ld1r.c
@@ -0,0 +1,194 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          }
+        }
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->rndnu_neon.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mull-ld2r.c b/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mull-ld2r.c
new file mode 100644
index 0000000..e1127b0
--- /dev/null
+++ b/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mull-ld2r.c
@@ -0,0 +1,192 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          }
+        }
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->rndnu_neon.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-dup.c b/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-dup.c
index 778d81e..c2cc4ff 100644
--- a/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-dup.c
+++ b/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-dup.c
@@ -97,6 +97,7 @@
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
         const int8x8_t va1c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 0));
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -129,6 +130,7 @@
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
         const int8x8_t va1c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 1));
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -161,6 +163,7 @@
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
         const int8x8_t va1c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 2));
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -193,6 +196,7 @@
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
         const int8x8_t va1c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 3));
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld1r.c b/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld1r.c
new file mode 100644
index 0000000..a171e2b
--- /dev/null
+++ b/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld1r.c
@@ -0,0 +1,527 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
+        int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0x0, va1c0x0);
+        const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
+        vprod1x89ABc0 = vmlal_s8(vprod1x89ABc0, vb89ABc0x1, va1c0x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
+        int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0x0, va1c0x0);
+        const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
+        vprod1xCDEFc0 = vmlal_s8(vprod1xCDEFc0, vbCDEFc0x1, va1c0x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
+        int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1x0, va1c1x0);
+        const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
+        vprod1x89ABc1 = vmlal_s8(vprod1x89ABc1, vb89ABc1x1, va1c1x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
+        int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1x0, va1c1x0);
+        const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
+        vprod1xCDEFc1 = vmlal_s8(vprod1xCDEFc1, vbCDEFc1x1, va1c1x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
+        int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2x0, va1c2x0);
+        const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
+        vprod1x89ABc2 = vmlal_s8(vprod1x89ABc2, vb89ABc2x1, va1c2x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
+        int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2x0, va1c2x0);
+        const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
+        vprod1xCDEFc2 = vmlal_s8(vprod1xCDEFc2, vbCDEFc2x1, va1c2x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
+        int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3x0, va1c3x0);
+        const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
+        vprod1x89ABc3 = vmlal_s8(vprod1x89ABc3, vb89ABc3x1, va1c3x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
+        int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3x0, va1c3x0);
+        const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
+        vprod1xCDEFc3 = vmlal_s8(vprod1xCDEFc3, vbCDEFc3x1, va1c3x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+          const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+            vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+            const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+            vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+          }
+        }
+      }
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld2r.c b/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld2r.c
new file mode 100644
index 0000000..38d94f3
--- /dev/null
+++ b/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld2r.c
@@ -0,0 +1,515 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
+        int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0x0, va1c0x0);
+        const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
+        vprod1x89ABc0 = vmlal_s8(vprod1x89ABc0, vb89ABc0x1, va1c0x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
+        int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0x0, va1c0x0);
+        const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
+        vprod1xCDEFc0 = vmlal_s8(vprod1xCDEFc0, vbCDEFc0x1, va1c0x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
+        int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1x0, va1c1x0);
+        const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
+        vprod1x89ABc1 = vmlal_s8(vprod1x89ABc1, vb89ABc1x1, va1c1x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
+        int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1x0, va1c1x0);
+        const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
+        vprod1xCDEFc1 = vmlal_s8(vprod1xCDEFc1, vbCDEFc1x1, va1c1x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
+        int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2x0, va1c2x0);
+        const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
+        vprod1x89ABc2 = vmlal_s8(vprod1x89ABc2, vb89ABc2x1, va1c2x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
+        int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2x0, va1c2x0);
+        const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
+        vprod1xCDEFc2 = vmlal_s8(vprod1xCDEFc2, vbCDEFc2x1, va1c2x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
+        int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3x0, va1c3x0);
+        const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
+        vprod1x89ABc3 = vmlal_s8(vprod1x89ABc3, vb89ABc3x1, va1c3x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
+        int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3x0, va1c3x0);
+        const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
+        vprod1xCDEFc3 = vmlal_s8(vprod1xCDEFc3, vbCDEFc3x1, va1c3x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+          const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+            vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+            const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+            vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+          }
+        }
+      }
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld4r.c b/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld4r.c
index b8efd96..6fb25bd 100644
--- a/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld4r.c
+++ b/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld4r.c
@@ -97,6 +97,7 @@
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
         const int8x8_t va1c0x0 = vreinterpret_s8_s16(va1x0.val[0]);
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(va1x1.val[0]);
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -129,6 +130,7 @@
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
         const int8x8_t va1c1x0 = vreinterpret_s8_s16(va1x0.val[1]);
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(va1x1.val[1]);
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -161,6 +163,7 @@
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
         const int8x8_t va1c2x0 = vreinterpret_s8_s16(va1x0.val[2]);
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(va1x1.val[2]);
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -193,6 +196,7 @@
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
         const int8x8_t va1c3x0 = vreinterpret_s8_s16(va1x0.val[3]);
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(va1x1.val[3]);
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mull-ld1r.c b/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mull-ld1r.c
new file mode 100644
index 0000000..c20966e
--- /dev/null
+++ b/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mull-ld1r.c
@@ -0,0 +1,357 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+          const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+            vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+            const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+            vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+          }
+        }
+      }
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mull-ld2r.c b/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mull-ld2r.c
new file mode 100644
index 0000000..1d7b9a0
--- /dev/null
+++ b/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mull-ld2r.c
@@ -0,0 +1,353 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+          const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+            vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+            const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+            vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+          }
+        }
+      }
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c
index e96bccf..ce8efc9 100644
--- a/src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c
+++ b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c
@@ -85,6 +85,7 @@
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
         const int8x8_t va1c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 0));
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -103,6 +104,7 @@
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
         const int8x8_t va1c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 1));
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -121,6 +123,7 @@
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
         const int8x8_t va1c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 2));
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -139,6 +142,7 @@
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
         const int8x8_t va1c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 3));
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld1r.c b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld1r.c
new file mode 100644
index 0000000..01825d8
--- /dev/null
+++ b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld1r.c
@@ -0,0 +1,372 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          }
+        }
+      }
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+    float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
+    float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neon.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+    vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale);
+    vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale);
+
+    const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_min_less_zero_point);
+    vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
+    vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
+    vfpacc1x0123 = vmaxq_f32(vfpacc1x0123, voutput_min_less_zero_point);
+    vfpacc1x4567 = vmaxq_f32(vfpacc1x4567, voutput_min_less_zero_point);
+
+    const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_max_less_zero_point);
+    vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
+    vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
+    vfpacc1x0123 = vminq_f32(vfpacc1x0123, voutput_max_less_zero_point);
+    vfpacc1x4567 = vminq_f32(vfpacc1x4567, voutput_max_less_zero_point);
+
+    const float32x4_t vmagic_bias = vld1q_dup_f32(&params->fp32_neon.magic_bias);
+    vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
+    vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
+    vacc1x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x0123, vmagic_bias));
+    vacc1x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x4567, vmagic_bias));
+
+    const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->fp32_neon.magic_bias_less_zero_point);
+    vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
+    vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
+    vacc1x0123 = vsubq_s32(vacc1x0123, vmagic_bias_less_zero_point);
+    vacc1x4567 = vsubq_s32(vacc1x4567, vmagic_bias_less_zero_point);
+
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
+    const int16x8_t vacc1x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc1x0123), vreinterpretq_s16_s32(vacc1x4567));
+
+    int8x16_t vout0x01234567_1x01234567 = vuzp1q_s8(vreinterpretq_s8_s16(vacc0x01234567), vreinterpretq_s8_s16(vacc1x01234567));
+#else
+    const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
+    const int16x8_t vacc1x01234567 = vcombine_s16(vmovn_s32(vacc1x0123), vmovn_s32(vacc1x4567));
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vmovn_s16(vacc0x01234567), vmovn_s16(vacc1x01234567));
+#endif
+
+    if (nc >= 8) {
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld2r.c b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld2r.c
new file mode 100644
index 0000000..20529b1
--- /dev/null
+++ b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld2r.c
@@ -0,0 +1,360 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          }
+        }
+      }
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+    float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
+    float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neon.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+    vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale);
+    vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale);
+
+    const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_min_less_zero_point);
+    vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
+    vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
+    vfpacc1x0123 = vmaxq_f32(vfpacc1x0123, voutput_min_less_zero_point);
+    vfpacc1x4567 = vmaxq_f32(vfpacc1x4567, voutput_min_less_zero_point);
+
+    const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_max_less_zero_point);
+    vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
+    vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
+    vfpacc1x0123 = vminq_f32(vfpacc1x0123, voutput_max_less_zero_point);
+    vfpacc1x4567 = vminq_f32(vfpacc1x4567, voutput_max_less_zero_point);
+
+    const float32x4_t vmagic_bias = vld1q_dup_f32(&params->fp32_neon.magic_bias);
+    vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
+    vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
+    vacc1x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x0123, vmagic_bias));
+    vacc1x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x4567, vmagic_bias));
+
+    const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->fp32_neon.magic_bias_less_zero_point);
+    vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
+    vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
+    vacc1x0123 = vsubq_s32(vacc1x0123, vmagic_bias_less_zero_point);
+    vacc1x4567 = vsubq_s32(vacc1x4567, vmagic_bias_less_zero_point);
+
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
+    const int16x8_t vacc1x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc1x0123), vreinterpretq_s16_s32(vacc1x4567));
+
+    int8x16_t vout0x01234567_1x01234567 = vuzp1q_s8(vreinterpretq_s8_s16(vacc0x01234567), vreinterpretq_s8_s16(vacc1x01234567));
+#else
+    const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
+    const int16x8_t vacc1x01234567 = vcombine_s16(vmovn_s32(vacc1x0123), vmovn_s32(vacc1x4567));
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vmovn_s16(vacc0x01234567), vmovn_s16(vacc1x01234567));
+#endif
+
+    if (nc >= 8) {
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c
index 5b5674f..47dbc29 100644
--- a/src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c
+++ b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c
@@ -85,6 +85,7 @@
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
         const int8x8_t va1c0x0 = vreinterpret_s8_s16(va1x0.val[0]);
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(va1x1.val[0]);
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -103,6 +104,7 @@
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
         const int8x8_t va1c1x0 = vreinterpret_s8_s16(va1x0.val[1]);
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(va1x1.val[1]);
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -121,6 +123,7 @@
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
         const int8x8_t va1c2x0 = vreinterpret_s8_s16(va1x0.val[2]);
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(va1x1.val[2]);
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -139,6 +142,7 @@
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
         const int8x8_t va1c3x0 = vreinterpret_s8_s16(va1x0.val[3]);
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(va1x1.val[3]);
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c
index 4bdf2c5..6cec06f 100644
--- a/src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c
+++ b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c
@@ -86,6 +86,7 @@
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
         const int8x8_t va1c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 0));
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -104,6 +105,7 @@
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
         const int8x8_t va1c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 1));
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -122,6 +124,7 @@
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
         const int8x8_t va1c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 2));
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -140,6 +143,7 @@
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
         const int8x8_t va1c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 3));
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld1r.c b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld1r.c
new file mode 100644
index 0000000..8fc47b3
--- /dev/null
+++ b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld1r.c
@@ -0,0 +1,361 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          }
+        }
+      }
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+    float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
+    float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neonv8.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+    vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale);
+    vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale);
+
+    vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
+    vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
+    vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123);
+    vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neonv8.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neonv8.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld2r.c b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld2r.c
new file mode 100644
index 0000000..90f8e0c
--- /dev/null
+++ b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld2r.c
@@ -0,0 +1,349 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          }
+        }
+      }
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+    float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
+    float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neonv8.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+    vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale);
+    vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale);
+
+    vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
+    vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
+    vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123);
+    vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neonv8.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neonv8.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c
index c2d90bb..9693095 100644
--- a/src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c
+++ b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c
@@ -86,6 +86,7 @@
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
         const int8x8_t va1c0x0 = vreinterpret_s8_s16(va1x0.val[0]);
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(va1x1.val[0]);
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -104,6 +105,7 @@
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
         const int8x8_t va1c1x0 = vreinterpret_s8_s16(va1x0.val[1]);
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(va1x1.val[1]);
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -122,6 +124,7 @@
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
         const int8x8_t va1c2x0 = vreinterpret_s8_s16(va1x0.val[2]);
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(va1x1.val[2]);
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -140,6 +143,7 @@
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
         const int8x8_t va1c3x0 = vreinterpret_s8_s16(va1x0.val[3]);
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(va1x1.val[3]);
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-gemmlowp-neon-mlal-dup.c b/src/qs8-igemm/gen/2x8c2-minmax-gemmlowp-neon-mlal-dup.c
index 59bc18a..10ec5e4 100644
--- a/src/qs8-igemm/gen/2x8c2-minmax-gemmlowp-neon-mlal-dup.c
+++ b/src/qs8-igemm/gen/2x8c2-minmax-gemmlowp-neon-mlal-dup.c
@@ -85,6 +85,7 @@
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
         const int8x8_t va1c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 0));
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -103,6 +104,7 @@
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
         const int8x8_t va1c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 1));
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -121,6 +123,7 @@
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
         const int8x8_t va1c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 2));
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -139,6 +142,7 @@
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
         const int8x8_t va1c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 3));
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-dup.c b/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-dup.c
index 832d222..b3d4265 100644
--- a/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-dup.c
+++ b/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-dup.c
@@ -85,6 +85,7 @@
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
         const int8x8_t va1c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 0));
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -103,6 +104,7 @@
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
         const int8x8_t va1c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 1));
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -121,6 +123,7 @@
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
         const int8x8_t va1c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 2));
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -139,6 +142,7 @@
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
         const int8x8_t va1c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 3));
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld1r.c b/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld1r.c
new file mode 100644
index 0000000..b3d5ecb
--- /dev/null
+++ b/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld1r.c
@@ -0,0 +1,363 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          }
+        }
+      }
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld2r.c b/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld2r.c
new file mode 100644
index 0000000..e08d7a8
--- /dev/null
+++ b/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld2r.c
@@ -0,0 +1,351 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          }
+        }
+      }
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld4r.c b/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld4r.c
index 1261460..29c87f8 100644
--- a/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld4r.c
+++ b/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld4r.c
@@ -85,6 +85,7 @@
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
         const int8x8_t va1c0x0 = vreinterpret_s8_s16(va1x0.val[0]);
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(va1x1.val[0]);
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -103,6 +104,7 @@
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
         const int8x8_t va1c1x0 = vreinterpret_s8_s16(va1x0.val[1]);
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(va1x1.val[1]);
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -121,6 +123,7 @@
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
         const int8x8_t va1c2x0 = vreinterpret_s8_s16(va1x0.val[2]);
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(va1x1.val[2]);
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -139,6 +142,7 @@
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
         const int8x8_t va1c3x0 = vreinterpret_s8_s16(va1x0.val[3]);
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(va1x1.val[3]);
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mull-ld1r.c b/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mull-ld1r.c
new file mode 100644
index 0000000..987e197
--- /dev/null
+++ b/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mull-ld1r.c
@@ -0,0 +1,257 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          }
+        }
+      }
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mull-ld2r.c b/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mull-ld2r.c
new file mode 100644
index 0000000..aada03a
--- /dev/null
+++ b/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mull-ld2r.c
@@ -0,0 +1,253 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          }
+        }
+      }
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-dup.c b/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-dup.c
index 7627e9c..6da9949 100644
--- a/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-dup.c
+++ b/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-dup.c
@@ -113,6 +113,7 @@
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 0));
         const int8x8_t va2c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x0), 0));
         const int8x8_t va2c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
@@ -159,6 +160,7 @@
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 1));
         const int8x8_t va2c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x0), 1));
         const int8x8_t va2c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
@@ -205,6 +207,7 @@
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 2));
         const int8x8_t va2c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x0), 2));
         const int8x8_t va2c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
@@ -251,6 +254,7 @@
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 3));
         const int8x8_t va2c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x0), 3));
         const int8x8_t va2c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
diff --git a/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld1r.c b/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld1r.c
new file mode 100644
index 0000000..e73705d
--- /dev/null
+++ b/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld1r.c
@@ -0,0 +1,701 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (3 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc2x89AB = vacc0x89AB;
+    int32x4_t vacc2xCDEF = vacc0xCDEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      a += 3;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va20x0 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21x0 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22x0 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23x0 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+        const int16x4_t va20x1 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21x1 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22x1 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23x1 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
+        const int8x8_t va2c0x0 = vreinterpret_s8_s16(va20x0);
+        const int8x8_t va2c0x1 = vreinterpret_s8_s16(va20x1);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
+        int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0x0, va1c0x0);
+        int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0x0, va2c0x0);
+        const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
+        vprod1x89ABc0 = vmlal_s8(vprod1x89ABc0, vb89ABc0x1, va1c0x1);
+        vprod2x89ABc0 = vmlal_s8(vprod2x89ABc0, vb89ABc0x1, va2c0x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
+        int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0x0, va1c0x0);
+        int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0x0, va2c0x0);
+        const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
+        vprod1xCDEFc0 = vmlal_s8(vprod1xCDEFc0, vbCDEFc0x1, va1c0x1);
+        vprod2xCDEFc0 = vmlal_s8(vprod2xCDEFc0, vbCDEFc0x1, va2c0x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
+        const int8x8_t va2c1x0 = vreinterpret_s8_s16(va21x0);
+        const int8x8_t va2c1x1 = vreinterpret_s8_s16(va21x1);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
+        int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1x0, va1c1x0);
+        int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1x0, va2c1x0);
+        const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
+        vprod1x89ABc1 = vmlal_s8(vprod1x89ABc1, vb89ABc1x1, va1c1x1);
+        vprod2x89ABc1 = vmlal_s8(vprod2x89ABc1, vb89ABc1x1, va2c1x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
+        int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1x0, va1c1x0);
+        int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1x0, va2c1x0);
+        const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
+        vprod1xCDEFc1 = vmlal_s8(vprod1xCDEFc1, vbCDEFc1x1, va1c1x1);
+        vprod2xCDEFc1 = vmlal_s8(vprod2xCDEFc1, vbCDEFc1x1, va2c1x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
+        const int8x8_t va2c2x0 = vreinterpret_s8_s16(va22x0);
+        const int8x8_t va2c2x1 = vreinterpret_s8_s16(va22x1);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
+        int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2x0, va1c2x0);
+        int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2x0, va2c2x0);
+        const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
+        vprod1x89ABc2 = vmlal_s8(vprod1x89ABc2, vb89ABc2x1, va1c2x1);
+        vprod2x89ABc2 = vmlal_s8(vprod2x89ABc2, vb89ABc2x1, va2c2x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+        int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
+        int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2x0, va1c2x0);
+        int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2x0, va2c2x0);
+        const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
+        vprod1xCDEFc2 = vmlal_s8(vprod1xCDEFc2, vbCDEFc2x1, va1c2x1);
+        vprod2xCDEFc2 = vmlal_s8(vprod2xCDEFc2, vbCDEFc2x1, va2c2x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
+        const int8x8_t va2c3x0 = vreinterpret_s8_s16(va23x0);
+        const int8x8_t va2c3x1 = vreinterpret_s8_s16(va23x1);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
+        int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3x0, va1c3x0);
+        int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3x0, va2c3x0);
+        const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
+        vprod1x89ABc3 = vmlal_s8(vprod1x89ABc3, vb89ABc3x1, va1c3x1);
+        vprod2x89ABc3 = vmlal_s8(vprod2x89ABc3, vb89ABc3x1, va2c3x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+        int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
+        int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3x0, va1c3x0);
+        int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3x0, va2c3x0);
+        const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
+        vprod1xCDEFc3 = vmlal_s8(vprod1xCDEFc3, vbCDEFc3x1, va1c3x1);
+        vprod2xCDEFc3 = vmlal_s8(vprod2xCDEFc3, vbCDEFc3x1, va2c3x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va20 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va21);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va22);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+        const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+        const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va23);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+        const int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3, va2c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+        const int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3, va2c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+          const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+          const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+          vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+          const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+          vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+            vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+            const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+            vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+            const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+            vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+            const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+            vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+          }
+        }
+      }
+      p -= 3 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x8_t vout2x01234567 = vget_low_s8(vout2x0123456789ABCDEF);
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c2, vout2x01234567); c2 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout2x01234567 = vget_high_s8(vout2x0123456789ABCDEF);
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c2, vout2x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld2r.c b/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld2r.c
new file mode 100644
index 0000000..5c61701
--- /dev/null
+++ b/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld2r.c
@@ -0,0 +1,683 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (3 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc2x89AB = vacc0x89AB;
+    int32x4_t vacc2xCDEF = vacc0xCDEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      a += 3;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va20x0 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21x0 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+        const int16x4x2_t va20x1 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21x1 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
+        const int8x8_t va2c0x0 = vreinterpret_s8_s16(va20x0.val[0]);
+        const int8x8_t va2c0x1 = vreinterpret_s8_s16(va20x1.val[0]);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
+        int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0x0, va1c0x0);
+        int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0x0, va2c0x0);
+        const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
+        vprod1x89ABc0 = vmlal_s8(vprod1x89ABc0, vb89ABc0x1, va1c0x1);
+        vprod2x89ABc0 = vmlal_s8(vprod2x89ABc0, vb89ABc0x1, va2c0x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
+        int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0x0, va1c0x0);
+        int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0x0, va2c0x0);
+        const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
+        vprod1xCDEFc0 = vmlal_s8(vprod1xCDEFc0, vbCDEFc0x1, va1c0x1);
+        vprod2xCDEFc0 = vmlal_s8(vprod2xCDEFc0, vbCDEFc0x1, va2c0x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
+        const int8x8_t va2c1x0 = vreinterpret_s8_s16(va20x0.val[1]);
+        const int8x8_t va2c1x1 = vreinterpret_s8_s16(va20x1.val[1]);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
+        int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1x0, va1c1x0);
+        int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1x0, va2c1x0);
+        const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
+        vprod1x89ABc1 = vmlal_s8(vprod1x89ABc1, vb89ABc1x1, va1c1x1);
+        vprod2x89ABc1 = vmlal_s8(vprod2x89ABc1, vb89ABc1x1, va2c1x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
+        int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1x0, va1c1x0);
+        int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1x0, va2c1x0);
+        const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
+        vprod1xCDEFc1 = vmlal_s8(vprod1xCDEFc1, vbCDEFc1x1, va1c1x1);
+        vprod2xCDEFc1 = vmlal_s8(vprod2xCDEFc1, vbCDEFc1x1, va2c1x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
+        const int8x8_t va2c2x0 = vreinterpret_s8_s16(va21x0.val[0]);
+        const int8x8_t va2c2x1 = vreinterpret_s8_s16(va21x1.val[0]);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
+        int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2x0, va1c2x0);
+        int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2x0, va2c2x0);
+        const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
+        vprod1x89ABc2 = vmlal_s8(vprod1x89ABc2, vb89ABc2x1, va1c2x1);
+        vprod2x89ABc2 = vmlal_s8(vprod2x89ABc2, vb89ABc2x1, va2c2x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+        int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
+        int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2x0, va1c2x0);
+        int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2x0, va2c2x0);
+        const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
+        vprod1xCDEFc2 = vmlal_s8(vprod1xCDEFc2, vbCDEFc2x1, va1c2x1);
+        vprod2xCDEFc2 = vmlal_s8(vprod2xCDEFc2, vbCDEFc2x1, va2c2x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
+        const int8x8_t va2c3x0 = vreinterpret_s8_s16(va21x0.val[1]);
+        const int8x8_t va2c3x1 = vreinterpret_s8_s16(va21x1.val[1]);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
+        int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3x0, va1c3x0);
+        int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3x0, va2c3x0);
+        const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
+        vprod1x89ABc3 = vmlal_s8(vprod1x89ABc3, vb89ABc3x1, va1c3x1);
+        vprod2x89ABc3 = vmlal_s8(vprod2x89ABc3, vb89ABc3x1, va2c3x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+        int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
+        int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3x0, va1c3x0);
+        int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3x0, va2c3x0);
+        const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
+        vprod1xCDEFc3 = vmlal_s8(vprod1xCDEFc3, vbCDEFc3x1, va1c3x1);
+        vprod2xCDEFc3 = vmlal_s8(vprod2xCDEFc3, vbCDEFc3x1, va2c3x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va20 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va20.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va21.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+        const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+        const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va21.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+        const int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3, va2c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+        const int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3, va2c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+          const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+          const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+          vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+          const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+          vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+            vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+            const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+            vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+            const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+            vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+            const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+            vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+          }
+        }
+      }
+      p -= 3 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x8_t vout2x01234567 = vget_low_s8(vout2x0123456789ABCDEF);
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c2, vout2x01234567); c2 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout2x01234567 = vget_high_s8(vout2x0123456789ABCDEF);
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c2, vout2x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld4r.c b/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld4r.c
index ed90930..46c8889 100644
--- a/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld4r.c
+++ b/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld4r.c
@@ -113,6 +113,7 @@
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(va1x1.val[0]);
         const int8x8_t va2c0x0 = vreinterpret_s8_s16(va2x0.val[0]);
         const int8x8_t va2c0x1 = vreinterpret_s8_s16(va2x1.val[0]);
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
@@ -159,6 +160,7 @@
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(va1x1.val[1]);
         const int8x8_t va2c1x0 = vreinterpret_s8_s16(va2x0.val[1]);
         const int8x8_t va2c1x1 = vreinterpret_s8_s16(va2x1.val[1]);
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
@@ -205,6 +207,7 @@
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(va1x1.val[2]);
         const int8x8_t va2c2x0 = vreinterpret_s8_s16(va2x0.val[2]);
         const int8x8_t va2c2x1 = vreinterpret_s8_s16(va2x1.val[2]);
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
@@ -251,6 +254,7 @@
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(va1x1.val[3]);
         const int8x8_t va2c3x0 = vreinterpret_s8_s16(va2x0.val[3]);
         const int8x8_t va2c3x1 = vreinterpret_s8_s16(va2x1.val[3]);
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
diff --git a/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mull-ld1r.c b/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mull-ld1r.c
new file mode 100644
index 0000000..e6758fe
--- /dev/null
+++ b/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mull-ld1r.c
@@ -0,0 +1,467 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (3 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc2x89AB = vacc0x89AB;
+    int32x4_t vacc2xCDEF = vacc0xCDEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      a += 3;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va20 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va21);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va22);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+        const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+        const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va23);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+        const int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3, va2c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+        const int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3, va2c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+          const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+          const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+          vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+          const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+          vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+            vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+            const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+            vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+            const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+            vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+            const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+            vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+          }
+        }
+      }
+      p -= 3 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x8_t vout2x01234567 = vget_low_s8(vout2x0123456789ABCDEF);
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c2, vout2x01234567); c2 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout2x01234567 = vget_high_s8(vout2x0123456789ABCDEF);
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c2, vout2x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mull-ld2r.c b/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mull-ld2r.c
new file mode 100644
index 0000000..3464de6
--- /dev/null
+++ b/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mull-ld2r.c
@@ -0,0 +1,461 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (3 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc2x89AB = vacc0x89AB;
+    int32x4_t vacc2xCDEF = vacc0xCDEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      a += 3;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va20 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va20.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va21.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+        const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+        const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va21.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+        const int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3, va2c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+        const int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3, va2c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+          const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+          const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+          vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+          const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+          vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+            vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+            const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+            vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+            const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+            vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+            const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+            vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+          }
+        }
+      }
+      p -= 3 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x8_t vout2x01234567 = vget_low_s8(vout2x0123456789ABCDEF);
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c2, vout2x01234567); c2 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout2x01234567 = vget_high_s8(vout2x0123456789ABCDEF);
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c2, vout2x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-dup.c b/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-dup.c
index 3eb683b..15224bf 100644
--- a/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-dup.c
+++ b/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-dup.c
@@ -99,6 +99,7 @@
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 0));
         const int8x8_t va2c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x0), 0));
         const int8x8_t va2c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
@@ -125,6 +126,7 @@
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 1));
         const int8x8_t va2c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x0), 1));
         const int8x8_t va2c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
@@ -151,6 +153,7 @@
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 2));
         const int8x8_t va2c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x0), 2));
         const int8x8_t va2c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
@@ -177,6 +180,7 @@
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 3));
         const int8x8_t va2c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x0), 3));
         const int8x8_t va2c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
diff --git a/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld1r.c b/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld1r.c
new file mode 100644
index 0000000..b9d6b78
--- /dev/null
+++ b/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld1r.c
@@ -0,0 +1,472 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (3 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      a += 3;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va20x0 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21x0 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22x0 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23x0 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+        const int16x4_t va20x1 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21x1 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22x1 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23x1 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
+        const int8x8_t va2c0x0 = vreinterpret_s8_s16(va20x0);
+        const int8x8_t va2c0x1 = vreinterpret_s8_s16(va20x1);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
+        const int8x8_t va2c1x0 = vreinterpret_s8_s16(va21x0);
+        const int8x8_t va2c1x1 = vreinterpret_s8_s16(va21x1);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
+        const int8x8_t va2c2x0 = vreinterpret_s8_s16(va22x0);
+        const int8x8_t va2c2x1 = vreinterpret_s8_s16(va22x1);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
+        const int8x8_t va2c3x0 = vreinterpret_s8_s16(va23x0);
+        const int8x8_t va2c3x1 = vreinterpret_s8_s16(va23x1);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va20 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va21);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va22);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va23);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+          }
+        }
+      }
+      p -= 3 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min));
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max));
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c2 + 0, vout2x01234567);
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c2, vout2x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld2r.c b/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld2r.c
new file mode 100644
index 0000000..d2c2745
--- /dev/null
+++ b/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld2r.c
@@ -0,0 +1,454 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (3 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      a += 3;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va20x0 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21x0 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+        const int16x4x2_t va20x1 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21x1 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
+        const int8x8_t va2c0x0 = vreinterpret_s8_s16(va20x0.val[0]);
+        const int8x8_t va2c0x1 = vreinterpret_s8_s16(va20x1.val[0]);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
+        const int8x8_t va2c1x0 = vreinterpret_s8_s16(va20x0.val[1]);
+        const int8x8_t va2c1x1 = vreinterpret_s8_s16(va20x1.val[1]);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
+        const int8x8_t va2c2x0 = vreinterpret_s8_s16(va21x0.val[0]);
+        const int8x8_t va2c2x1 = vreinterpret_s8_s16(va21x1.val[0]);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
+        const int8x8_t va2c3x0 = vreinterpret_s8_s16(va21x0.val[1]);
+        const int8x8_t va2c3x1 = vreinterpret_s8_s16(va21x1.val[1]);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va20 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va20.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va21.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va21.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+          }
+        }
+      }
+      p -= 3 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min));
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max));
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c2 + 0, vout2x01234567);
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c2, vout2x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld4r.c b/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld4r.c
index 97f08e7..1fdfab1 100644
--- a/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld4r.c
+++ b/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld4r.c
@@ -99,6 +99,7 @@
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(va1x1.val[0]);
         const int8x8_t va2c0x0 = vreinterpret_s8_s16(va2x0.val[0]);
         const int8x8_t va2c0x1 = vreinterpret_s8_s16(va2x1.val[0]);
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
@@ -125,6 +126,7 @@
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(va1x1.val[1]);
         const int8x8_t va2c1x0 = vreinterpret_s8_s16(va2x0.val[1]);
         const int8x8_t va2c1x1 = vreinterpret_s8_s16(va2x1.val[1]);
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
@@ -151,6 +153,7 @@
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(va1x1.val[2]);
         const int8x8_t va2c2x0 = vreinterpret_s8_s16(va2x0.val[2]);
         const int8x8_t va2c2x1 = vreinterpret_s8_s16(va2x1.val[2]);
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
@@ -177,6 +180,7 @@
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(va1x1.val[3]);
         const int8x8_t va2c3x0 = vreinterpret_s8_s16(va2x0.val[3]);
         const int8x8_t va2c3x1 = vreinterpret_s8_s16(va2x1.val[3]);
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
diff --git a/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mull-ld1r.c b/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mull-ld1r.c
new file mode 100644
index 0000000..8f840a0
--- /dev/null
+++ b/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mull-ld1r.c
@@ -0,0 +1,326 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (3 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      a += 3;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va20 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va21);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va22);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va23);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+          }
+        }
+      }
+      p -= 3 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min));
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max));
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c2 + 0, vout2x01234567);
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c2, vout2x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mull-ld2r.c b/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mull-ld2r.c
new file mode 100644
index 0000000..3fd3d05
--- /dev/null
+++ b/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mull-ld2r.c
@@ -0,0 +1,320 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (3 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      a += 3;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va20 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va20.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va21.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va21.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+          }
+        }
+      }
+      p -= 3 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min));
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max));
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c2 + 0, vout2x01234567);
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c2, vout2x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-dup.c b/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-dup.c
index 502b732..fe1e9f0 100644
--- a/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-dup.c
+++ b/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-dup.c
@@ -129,6 +129,7 @@
         const int8x8_t va2c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 0));
         const int8x8_t va3c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x0), 0));
         const int8x8_t va3c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
@@ -189,6 +190,7 @@
         const int8x8_t va2c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 1));
         const int8x8_t va3c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x0), 1));
         const int8x8_t va3c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
@@ -249,6 +251,7 @@
         const int8x8_t va2c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 2));
         const int8x8_t va3c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x0), 2));
         const int8x8_t va3c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
@@ -309,6 +312,7 @@
         const int8x8_t va2c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 3));
         const int8x8_t va3c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x0), 3));
         const int8x8_t va3c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
diff --git a/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld1r.c b/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld1r.c
new file mode 100644
index 0000000..da5095d
--- /dev/null
+++ b/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld1r.c
@@ -0,0 +1,871 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc2x89AB = vacc0x89AB;
+    int32x4_t vacc2xCDEF = vacc0xCDEF;
+    int32x4_t vacc3x0123 = vacc0x0123;
+    int32x4_t vacc3x4567 = vacc0x4567;
+    int32x4_t vacc3x89AB = vacc0x89AB;
+    int32x4_t vacc3xCDEF = vacc0xCDEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      const int8_t* restrict a3 = a[3];
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va20x0 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21x0 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22x0 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23x0 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+        const int16x4_t va20x1 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21x1 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22x1 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23x1 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+        const int16x4_t va30x0 = vld1_dup_s16((const void*)a3);
+        const int16x4_t va31x0 = vld1_dup_s16((const void*)(a3 + 2));
+        const int16x4_t va32x0 = vld1_dup_s16((const void*)(a3 + 4));
+        const int16x4_t va33x0 = vld1_dup_s16((const void*)(a3 + 6)); a3 += 8;
+        const int16x4_t va30x1 = vld1_dup_s16((const void*)a3);
+        const int16x4_t va31x1 = vld1_dup_s16((const void*)(a3 + 2));
+        const int16x4_t va32x1 = vld1_dup_s16((const void*)(a3 + 4));
+        const int16x4_t va33x1 = vld1_dup_s16((const void*)(a3 + 6)); a3 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
+        const int8x8_t va2c0x0 = vreinterpret_s8_s16(va20x0);
+        const int8x8_t va2c0x1 = vreinterpret_s8_s16(va20x1);
+        const int8x8_t va3c0x0 = vreinterpret_s8_s16(va30x0);
+        const int8x8_t va3c0x1 = vreinterpret_s8_s16(va30x1);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
+        int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0x0, va3c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2c0x1);
+        vprod3x0123c0 = vmlal_s8(vprod3x0123c0, vb0123c0x1, va3c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2c0x0);
+        int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0x0, va3c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2c0x1);
+        vprod3x4567c0 = vmlal_s8(vprod3x4567c0, vb4567c0x1, va3c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
+        int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0x0, va1c0x0);
+        int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0x0, va2c0x0);
+        int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0x0, va3c0x0);
+        const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
+        vprod1x89ABc0 = vmlal_s8(vprod1x89ABc0, vb89ABc0x1, va1c0x1);
+        vprod2x89ABc0 = vmlal_s8(vprod2x89ABc0, vb89ABc0x1, va2c0x1);
+        vprod3x89ABc0 = vmlal_s8(vprod3x89ABc0, vb89ABc0x1, va3c0x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+        int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
+        int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0x0, va1c0x0);
+        int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0x0, va2c0x0);
+        int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0x0, va3c0x0);
+        const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
+        vprod1xCDEFc0 = vmlal_s8(vprod1xCDEFc0, vbCDEFc0x1, va1c0x1);
+        vprod2xCDEFc0 = vmlal_s8(vprod2xCDEFc0, vbCDEFc0x1, va2c0x1);
+        vprod3xCDEFc0 = vmlal_s8(vprod3xCDEFc0, vbCDEFc0x1, va3c0x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
+        const int8x8_t va2c1x0 = vreinterpret_s8_s16(va21x0);
+        const int8x8_t va2c1x1 = vreinterpret_s8_s16(va21x1);
+        const int8x8_t va3c1x0 = vreinterpret_s8_s16(va31x0);
+        const int8x8_t va3c1x1 = vreinterpret_s8_s16(va31x1);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
+        int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1x0, va3c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2c1x1);
+        vprod3x0123c1 = vmlal_s8(vprod3x0123c1, vb0123c1x1, va3c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2c1x0);
+        int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1x0, va3c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2c1x1);
+        vprod3x4567c1 = vmlal_s8(vprod3x4567c1, vb4567c1x1, va3c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+        int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
+        int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1x0, va1c1x0);
+        int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1x0, va2c1x0);
+        int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1x0, va3c1x0);
+        const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
+        vprod1x89ABc1 = vmlal_s8(vprod1x89ABc1, vb89ABc1x1, va1c1x1);
+        vprod2x89ABc1 = vmlal_s8(vprod2x89ABc1, vb89ABc1x1, va2c1x1);
+        vprod3x89ABc1 = vmlal_s8(vprod3x89ABc1, vb89ABc1x1, va3c1x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+        int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
+        int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1x0, va1c1x0);
+        int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1x0, va2c1x0);
+        int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1x0, va3c1x0);
+        const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
+        vprod1xCDEFc1 = vmlal_s8(vprod1xCDEFc1, vbCDEFc1x1, va1c1x1);
+        vprod2xCDEFc1 = vmlal_s8(vprod2xCDEFc1, vbCDEFc1x1, va2c1x1);
+        vprod3xCDEFc1 = vmlal_s8(vprod3xCDEFc1, vbCDEFc1x1, va3c1x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
+        const int8x8_t va2c2x0 = vreinterpret_s8_s16(va22x0);
+        const int8x8_t va2c2x1 = vreinterpret_s8_s16(va22x1);
+        const int8x8_t va3c2x0 = vreinterpret_s8_s16(va32x0);
+        const int8x8_t va3c2x1 = vreinterpret_s8_s16(va32x1);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
+        int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2x0, va3c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2c2x1);
+        vprod3x0123c2 = vmlal_s8(vprod3x0123c2, vb0123c2x1, va3c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2c2x0);
+        int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2x0, va3c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2c2x1);
+        vprod3x4567c2 = vmlal_s8(vprod3x4567c2, vb4567c2x1, va3c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+        int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
+        int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2x0, va1c2x0);
+        int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2x0, va2c2x0);
+        int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2x0, va3c2x0);
+        const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
+        vprod1x89ABc2 = vmlal_s8(vprod1x89ABc2, vb89ABc2x1, va1c2x1);
+        vprod2x89ABc2 = vmlal_s8(vprod2x89ABc2, vb89ABc2x1, va2c2x1);
+        vprod3x89ABc2 = vmlal_s8(vprod3x89ABc2, vb89ABc2x1, va3c2x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+        int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
+        int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2x0, va1c2x0);
+        int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2x0, va2c2x0);
+        int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2x0, va3c2x0);
+        const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
+        vprod1xCDEFc2 = vmlal_s8(vprod1xCDEFc2, vbCDEFc2x1, va1c2x1);
+        vprod2xCDEFc2 = vmlal_s8(vprod2xCDEFc2, vbCDEFc2x1, va2c2x1);
+        vprod3xCDEFc2 = vmlal_s8(vprod3xCDEFc2, vbCDEFc2x1, va3c2x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
+        const int8x8_t va2c3x0 = vreinterpret_s8_s16(va23x0);
+        const int8x8_t va2c3x1 = vreinterpret_s8_s16(va23x1);
+        const int8x8_t va3c3x0 = vreinterpret_s8_s16(va33x0);
+        const int8x8_t va3c3x1 = vreinterpret_s8_s16(va33x1);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
+        int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3x0, va3c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2c3x1);
+        vprod3x0123c3 = vmlal_s8(vprod3x0123c3, vb0123c3x1, va3c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2c3x0);
+        int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3x0, va3c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2c3x1);
+        vprod3x4567c3 = vmlal_s8(vprod3x4567c3, vb4567c3x1, va3c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+        int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
+        int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3x0, va1c3x0);
+        int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3x0, va2c3x0);
+        int16x8_t vprod3x89ABc3 = vmull_s8(vb89ABc3x0, va3c3x0);
+        const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
+        vprod1x89ABc3 = vmlal_s8(vprod1x89ABc3, vb89ABc3x1, va1c3x1);
+        vprod2x89ABc3 = vmlal_s8(vprod2x89ABc3, vb89ABc3x1, va2c3x1);
+        vprod3x89ABc3 = vmlal_s8(vprod3x89ABc3, vb89ABc3x1, va3c3x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc3);
+        int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
+        int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3x0, va1c3x0);
+        int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3x0, va2c3x0);
+        int16x8_t vprod3xCDEFc3 = vmull_s8(vbCDEFc3x0, va3c3x0);
+        const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
+        vprod1xCDEFc3 = vmlal_s8(vprod1xCDEFc3, vbCDEFc3x1, va1c3x1);
+        vprod2xCDEFc3 = vmlal_s8(vprod2xCDEFc3, vbCDEFc3x1, va2c3x1);
+        vprod3xCDEFc3 = vmlal_s8(vprod3xCDEFc3, vbCDEFc3x1, va3c3x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va20 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+        const int16x4_t va30 = vld1_dup_s16((const void*)a3);
+        const int16x4_t va31 = vld1_dup_s16((const void*)(a3 + 2));
+        const int16x4_t va32 = vld1_dup_s16((const void*)(a3 + 4));
+        const int16x4_t va33 = vld1_dup_s16((const void*)(a3 + 6)); a3 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(va30);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        const int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0, va3c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        const int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0, va3c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va21);
+        const int8x8_t va3c1 = vreinterpret_s8_s16(va31);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+        const int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1, va3c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+        const int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1, va3c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va22);
+        const int8x8_t va3c2 = vreinterpret_s8_s16(va32);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+        const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+        const int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2, va3c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+        const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+        const int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2, va3c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va23);
+        const int8x8_t va3c3 = vreinterpret_s8_s16(va33);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+        const int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3, va2c3);
+        const int16x8_t vprod3x89ABc3 = vmull_s8(vb89ABc3, va3c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+        const int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3, va2c3);
+        const int16x8_t vprod3xCDEFc3 = vmull_s8(vbCDEFc3, va3c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+        const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        const int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0, va3c0);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+        const int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0, va3c0);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+          const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+          const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+          vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+          const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+          vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+          const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
+          const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+          vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+          const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+          vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+          const int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1, va3c1);
+          vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+          const int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1, va3c1);
+          vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+            vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+            const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+            vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+            const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+            vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+            const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+            vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+            const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
+            const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+            vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+            const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+            vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+            const int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2, va3c2);
+            vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+            const int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2, va3c2);
+            vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+          }
+        }
+      }
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+    vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift);
+    vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+    vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
+    vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+    vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
+    vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+    int8x16_t vout3x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc3x01234567), vacc3x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+    int8x16_t vout3x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc3x01234567), vqmovn_s16(vacc3x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout3x0123456789ABCDEF = vmaxq_s8(vout3x0123456789ABCDEF, voutput_min);
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout3x0123456789ABCDEF = vminq_s8(vout3x0123456789ABCDEF, voutput_max);
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c3 + 0, vout3x0123456789ABCDEF);
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vget_low_s8(vout2x0123456789ABCDEF), vget_low_s8(vout3x0123456789ABCDEF));
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c3, vget_high_s8(vout2x01234567_3x01234567)); c3 += 8;
+        vst1_s8(c2, vget_low_s8(vout2x01234567_3x01234567)); c2 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout2x01234567_3x01234567 = vcombine_s8(vget_high_s8(vout2x0123456789ABCDEF), vget_high_s8(vout3x0123456789ABCDEF));
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld2r.c b/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld2r.c
new file mode 100644
index 0000000..2326264
--- /dev/null
+++ b/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld2r.c
@@ -0,0 +1,847 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc2x89AB = vacc0x89AB;
+    int32x4_t vacc2xCDEF = vacc0xCDEF;
+    int32x4_t vacc3x0123 = vacc0x0123;
+    int32x4_t vacc3x4567 = vacc0x4567;
+    int32x4_t vacc3x89AB = vacc0x89AB;
+    int32x4_t vacc3xCDEF = vacc0xCDEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      const int8_t* restrict a3 = a[3];
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va20x0 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21x0 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+        const int16x4x2_t va20x1 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21x1 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+        const int16x4x2_t va30x0 = vld2_dup_s16((const void*)a3);
+        const int16x4x2_t va31x0 = vld2_dup_s16((const void*)(a3 + 4)); a3 += 8;
+        const int16x4x2_t va30x1 = vld2_dup_s16((const void*)a3);
+        const int16x4x2_t va31x1 = vld2_dup_s16((const void*)(a3 + 4)); a3 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
+        const int8x8_t va2c0x0 = vreinterpret_s8_s16(va20x0.val[0]);
+        const int8x8_t va2c0x1 = vreinterpret_s8_s16(va20x1.val[0]);
+        const int8x8_t va3c0x0 = vreinterpret_s8_s16(va30x0.val[0]);
+        const int8x8_t va3c0x1 = vreinterpret_s8_s16(va30x1.val[0]);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
+        int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0x0, va3c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2c0x1);
+        vprod3x0123c0 = vmlal_s8(vprod3x0123c0, vb0123c0x1, va3c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2c0x0);
+        int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0x0, va3c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2c0x1);
+        vprod3x4567c0 = vmlal_s8(vprod3x4567c0, vb4567c0x1, va3c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
+        int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0x0, va1c0x0);
+        int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0x0, va2c0x0);
+        int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0x0, va3c0x0);
+        const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
+        vprod1x89ABc0 = vmlal_s8(vprod1x89ABc0, vb89ABc0x1, va1c0x1);
+        vprod2x89ABc0 = vmlal_s8(vprod2x89ABc0, vb89ABc0x1, va2c0x1);
+        vprod3x89ABc0 = vmlal_s8(vprod3x89ABc0, vb89ABc0x1, va3c0x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+        int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
+        int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0x0, va1c0x0);
+        int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0x0, va2c0x0);
+        int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0x0, va3c0x0);
+        const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
+        vprod1xCDEFc0 = vmlal_s8(vprod1xCDEFc0, vbCDEFc0x1, va1c0x1);
+        vprod2xCDEFc0 = vmlal_s8(vprod2xCDEFc0, vbCDEFc0x1, va2c0x1);
+        vprod3xCDEFc0 = vmlal_s8(vprod3xCDEFc0, vbCDEFc0x1, va3c0x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
+        const int8x8_t va2c1x0 = vreinterpret_s8_s16(va20x0.val[1]);
+        const int8x8_t va2c1x1 = vreinterpret_s8_s16(va20x1.val[1]);
+        const int8x8_t va3c1x0 = vreinterpret_s8_s16(va30x0.val[1]);
+        const int8x8_t va3c1x1 = vreinterpret_s8_s16(va30x1.val[1]);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
+        int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1x0, va3c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2c1x1);
+        vprod3x0123c1 = vmlal_s8(vprod3x0123c1, vb0123c1x1, va3c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2c1x0);
+        int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1x0, va3c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2c1x1);
+        vprod3x4567c1 = vmlal_s8(vprod3x4567c1, vb4567c1x1, va3c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+        int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
+        int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1x0, va1c1x0);
+        int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1x0, va2c1x0);
+        int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1x0, va3c1x0);
+        const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
+        vprod1x89ABc1 = vmlal_s8(vprod1x89ABc1, vb89ABc1x1, va1c1x1);
+        vprod2x89ABc1 = vmlal_s8(vprod2x89ABc1, vb89ABc1x1, va2c1x1);
+        vprod3x89ABc1 = vmlal_s8(vprod3x89ABc1, vb89ABc1x1, va3c1x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+        int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
+        int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1x0, va1c1x0);
+        int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1x0, va2c1x0);
+        int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1x0, va3c1x0);
+        const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
+        vprod1xCDEFc1 = vmlal_s8(vprod1xCDEFc1, vbCDEFc1x1, va1c1x1);
+        vprod2xCDEFc1 = vmlal_s8(vprod2xCDEFc1, vbCDEFc1x1, va2c1x1);
+        vprod3xCDEFc1 = vmlal_s8(vprod3xCDEFc1, vbCDEFc1x1, va3c1x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
+        const int8x8_t va2c2x0 = vreinterpret_s8_s16(va21x0.val[0]);
+        const int8x8_t va2c2x1 = vreinterpret_s8_s16(va21x1.val[0]);
+        const int8x8_t va3c2x0 = vreinterpret_s8_s16(va31x0.val[0]);
+        const int8x8_t va3c2x1 = vreinterpret_s8_s16(va31x1.val[0]);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
+        int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2x0, va3c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2c2x1);
+        vprod3x0123c2 = vmlal_s8(vprod3x0123c2, vb0123c2x1, va3c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2c2x0);
+        int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2x0, va3c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2c2x1);
+        vprod3x4567c2 = vmlal_s8(vprod3x4567c2, vb4567c2x1, va3c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+        int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
+        int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2x0, va1c2x0);
+        int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2x0, va2c2x0);
+        int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2x0, va3c2x0);
+        const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
+        vprod1x89ABc2 = vmlal_s8(vprod1x89ABc2, vb89ABc2x1, va1c2x1);
+        vprod2x89ABc2 = vmlal_s8(vprod2x89ABc2, vb89ABc2x1, va2c2x1);
+        vprod3x89ABc2 = vmlal_s8(vprod3x89ABc2, vb89ABc2x1, va3c2x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+        int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
+        int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2x0, va1c2x0);
+        int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2x0, va2c2x0);
+        int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2x0, va3c2x0);
+        const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
+        vprod1xCDEFc2 = vmlal_s8(vprod1xCDEFc2, vbCDEFc2x1, va1c2x1);
+        vprod2xCDEFc2 = vmlal_s8(vprod2xCDEFc2, vbCDEFc2x1, va2c2x1);
+        vprod3xCDEFc2 = vmlal_s8(vprod3xCDEFc2, vbCDEFc2x1, va3c2x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
+        const int8x8_t va2c3x0 = vreinterpret_s8_s16(va21x0.val[1]);
+        const int8x8_t va2c3x1 = vreinterpret_s8_s16(va21x1.val[1]);
+        const int8x8_t va3c3x0 = vreinterpret_s8_s16(va31x0.val[1]);
+        const int8x8_t va3c3x1 = vreinterpret_s8_s16(va31x1.val[1]);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
+        int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3x0, va3c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2c3x1);
+        vprod3x0123c3 = vmlal_s8(vprod3x0123c3, vb0123c3x1, va3c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2c3x0);
+        int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3x0, va3c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2c3x1);
+        vprod3x4567c3 = vmlal_s8(vprod3x4567c3, vb4567c3x1, va3c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+        int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
+        int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3x0, va1c3x0);
+        int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3x0, va2c3x0);
+        int16x8_t vprod3x89ABc3 = vmull_s8(vb89ABc3x0, va3c3x0);
+        const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
+        vprod1x89ABc3 = vmlal_s8(vprod1x89ABc3, vb89ABc3x1, va1c3x1);
+        vprod2x89ABc3 = vmlal_s8(vprod2x89ABc3, vb89ABc3x1, va2c3x1);
+        vprod3x89ABc3 = vmlal_s8(vprod3x89ABc3, vb89ABc3x1, va3c3x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc3);
+        int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
+        int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3x0, va1c3x0);
+        int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3x0, va2c3x0);
+        int16x8_t vprod3xCDEFc3 = vmull_s8(vbCDEFc3x0, va3c3x0);
+        const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
+        vprod1xCDEFc3 = vmlal_s8(vprod1xCDEFc3, vbCDEFc3x1, va1c3x1);
+        vprod2xCDEFc3 = vmlal_s8(vprod2xCDEFc3, vbCDEFc3x1, va2c3x1);
+        vprod3xCDEFc3 = vmlal_s8(vprod3xCDEFc3, vbCDEFc3x1, va3c3x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va20 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+        const int16x4x2_t va30 = vld2_dup_s16((const void*)a3);
+        const int16x4x2_t va31 = vld2_dup_s16((const void*)(a3 + 4)); a3 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20.val[0]);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(va30.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        const int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0, va3c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        const int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0, va3c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va20.val[1]);
+        const int8x8_t va3c1 = vreinterpret_s8_s16(va30.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+        const int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1, va3c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+        const int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1, va3c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va21.val[0]);
+        const int8x8_t va3c2 = vreinterpret_s8_s16(va31.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+        const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+        const int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2, va3c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+        const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+        const int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2, va3c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va21.val[1]);
+        const int8x8_t va3c3 = vreinterpret_s8_s16(va31.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+        const int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3, va2c3);
+        const int16x8_t vprod3x89ABc3 = vmull_s8(vb89ABc3, va3c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+        const int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3, va2c3);
+        const int16x8_t vprod3xCDEFc3 = vmull_s8(vbCDEFc3, va3c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+        const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        const int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0, va3c0);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+        const int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0, va3c0);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+          const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+          const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+          vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+          const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+          vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+          const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
+          const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+          vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+          const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+          vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+          const int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1, va3c1);
+          vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+          const int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1, va3c1);
+          vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+            vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+            const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+            vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+            const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+            vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+            const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+            vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+            const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
+            const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+            vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+            const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+            vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+            const int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2, va3c2);
+            vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+            const int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2, va3c2);
+            vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+          }
+        }
+      }
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+    vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift);
+    vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+    vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
+    vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+    vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
+    vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+    int8x16_t vout3x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc3x01234567), vacc3x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+    int8x16_t vout3x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc3x01234567), vqmovn_s16(vacc3x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout3x0123456789ABCDEF = vmaxq_s8(vout3x0123456789ABCDEF, voutput_min);
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout3x0123456789ABCDEF = vminq_s8(vout3x0123456789ABCDEF, voutput_max);
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c3 + 0, vout3x0123456789ABCDEF);
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vget_low_s8(vout2x0123456789ABCDEF), vget_low_s8(vout3x0123456789ABCDEF));
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c3, vget_high_s8(vout2x01234567_3x01234567)); c3 += 8;
+        vst1_s8(c2, vget_low_s8(vout2x01234567_3x01234567)); c2 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout2x01234567_3x01234567 = vcombine_s8(vget_high_s8(vout2x0123456789ABCDEF), vget_high_s8(vout3x0123456789ABCDEF));
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld4r.c b/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld4r.c
index e282fba..f6c3e24 100644
--- a/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld4r.c
+++ b/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld4r.c
@@ -129,6 +129,7 @@
         const int8x8_t va2c0x1 = vreinterpret_s8_s16(va2x1.val[0]);
         const int8x8_t va3c0x0 = vreinterpret_s8_s16(va3x0.val[0]);
         const int8x8_t va3c0x1 = vreinterpret_s8_s16(va3x1.val[0]);
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
@@ -189,6 +190,7 @@
         const int8x8_t va2c1x1 = vreinterpret_s8_s16(va2x1.val[1]);
         const int8x8_t va3c1x0 = vreinterpret_s8_s16(va3x0.val[1]);
         const int8x8_t va3c1x1 = vreinterpret_s8_s16(va3x1.val[1]);
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
@@ -249,6 +251,7 @@
         const int8x8_t va2c2x1 = vreinterpret_s8_s16(va2x1.val[2]);
         const int8x8_t va3c2x0 = vreinterpret_s8_s16(va3x0.val[2]);
         const int8x8_t va3c2x1 = vreinterpret_s8_s16(va3x1.val[2]);
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
@@ -309,6 +312,7 @@
         const int8x8_t va2c3x1 = vreinterpret_s8_s16(va2x1.val[3]);
         const int8x8_t va3c3x0 = vreinterpret_s8_s16(va3x0.val[3]);
         const int8x8_t va3c3x1 = vreinterpret_s8_s16(va3x1.val[3]);
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
diff --git a/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mull-ld1r.c b/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mull-ld1r.c
new file mode 100644
index 0000000..46a978f
--- /dev/null
+++ b/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mull-ld1r.c
@@ -0,0 +1,573 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc2x89AB = vacc0x89AB;
+    int32x4_t vacc2xCDEF = vacc0xCDEF;
+    int32x4_t vacc3x0123 = vacc0x0123;
+    int32x4_t vacc3x4567 = vacc0x4567;
+    int32x4_t vacc3x89AB = vacc0x89AB;
+    int32x4_t vacc3xCDEF = vacc0xCDEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      const int8_t* restrict a3 = a[3];
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va20 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+        const int16x4_t va30 = vld1_dup_s16((const void*)a3);
+        const int16x4_t va31 = vld1_dup_s16((const void*)(a3 + 2));
+        const int16x4_t va32 = vld1_dup_s16((const void*)(a3 + 4));
+        const int16x4_t va33 = vld1_dup_s16((const void*)(a3 + 6)); a3 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(va30);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        const int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0, va3c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        const int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0, va3c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va21);
+        const int8x8_t va3c1 = vreinterpret_s8_s16(va31);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+        const int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1, va3c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+        const int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1, va3c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va22);
+        const int8x8_t va3c2 = vreinterpret_s8_s16(va32);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+        const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+        const int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2, va3c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+        const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+        const int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2, va3c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va23);
+        const int8x8_t va3c3 = vreinterpret_s8_s16(va33);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+        const int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3, va2c3);
+        const int16x8_t vprod3x89ABc3 = vmull_s8(vb89ABc3, va3c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+        const int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3, va2c3);
+        const int16x8_t vprod3xCDEFc3 = vmull_s8(vbCDEFc3, va3c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+        const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        const int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0, va3c0);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+        const int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0, va3c0);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+          const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+          const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+          vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+          const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+          vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+          const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
+          const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+          vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+          const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+          vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+          const int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1, va3c1);
+          vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+          const int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1, va3c1);
+          vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+            vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+            const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+            vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+            const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+            vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+            const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+            vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+            const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
+            const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+            vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+            const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+            vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+            const int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2, va3c2);
+            vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+            const int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2, va3c2);
+            vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+          }
+        }
+      }
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+    vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift);
+    vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+    vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
+    vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+    vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
+    vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+    int8x16_t vout3x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc3x01234567), vacc3x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+    int8x16_t vout3x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc3x01234567), vqmovn_s16(vacc3x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout3x0123456789ABCDEF = vmaxq_s8(vout3x0123456789ABCDEF, voutput_min);
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout3x0123456789ABCDEF = vminq_s8(vout3x0123456789ABCDEF, voutput_max);
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c3 + 0, vout3x0123456789ABCDEF);
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vget_low_s8(vout2x0123456789ABCDEF), vget_low_s8(vout3x0123456789ABCDEF));
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c3, vget_high_s8(vout2x01234567_3x01234567)); c3 += 8;
+        vst1_s8(c2, vget_low_s8(vout2x01234567_3x01234567)); c2 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout2x01234567_3x01234567 = vcombine_s8(vget_high_s8(vout2x0123456789ABCDEF), vget_high_s8(vout3x0123456789ABCDEF));
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mull-ld2r.c b/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mull-ld2r.c
new file mode 100644
index 0000000..a8a707c
--- /dev/null
+++ b/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mull-ld2r.c
@@ -0,0 +1,565 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc2x89AB = vacc0x89AB;
+    int32x4_t vacc2xCDEF = vacc0xCDEF;
+    int32x4_t vacc3x0123 = vacc0x0123;
+    int32x4_t vacc3x4567 = vacc0x4567;
+    int32x4_t vacc3x89AB = vacc0x89AB;
+    int32x4_t vacc3xCDEF = vacc0xCDEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      const int8_t* restrict a3 = a[3];
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va20 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+        const int16x4x2_t va30 = vld2_dup_s16((const void*)a3);
+        const int16x4x2_t va31 = vld2_dup_s16((const void*)(a3 + 4)); a3 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20.val[0]);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(va30.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        const int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0, va3c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        const int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0, va3c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va20.val[1]);
+        const int8x8_t va3c1 = vreinterpret_s8_s16(va30.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+        const int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1, va3c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+        const int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1, va3c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va21.val[0]);
+        const int8x8_t va3c2 = vreinterpret_s8_s16(va31.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+        const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+        const int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2, va3c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+        const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+        const int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2, va3c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va21.val[1]);
+        const int8x8_t va3c3 = vreinterpret_s8_s16(va31.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+        const int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3, va2c3);
+        const int16x8_t vprod3x89ABc3 = vmull_s8(vb89ABc3, va3c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+        const int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3, va2c3);
+        const int16x8_t vprod3xCDEFc3 = vmull_s8(vbCDEFc3, va3c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+        const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        const int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0, va3c0);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+        const int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0, va3c0);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+          const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+          const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+          vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+          const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+          vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+          const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
+          const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+          vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+          const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+          vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+          const int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1, va3c1);
+          vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+          const int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1, va3c1);
+          vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+            vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+            const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+            vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+            const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+            vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+            const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+            vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+            const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
+            const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+            vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+            const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+            vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+            const int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2, va3c2);
+            vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+            const int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2, va3c2);
+            vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+          }
+        }
+      }
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+    vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift);
+    vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+    vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
+    vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+    vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
+    vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+    int8x16_t vout3x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc3x01234567), vacc3x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+    int8x16_t vout3x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc3x01234567), vqmovn_s16(vacc3x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout3x0123456789ABCDEF = vmaxq_s8(vout3x0123456789ABCDEF, voutput_min);
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout3x0123456789ABCDEF = vminq_s8(vout3x0123456789ABCDEF, voutput_max);
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c3 + 0, vout3x0123456789ABCDEF);
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vget_low_s8(vout2x0123456789ABCDEF), vget_low_s8(vout3x0123456789ABCDEF));
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c3, vget_high_s8(vout2x01234567_3x01234567)); c3 += 8;
+        vst1_s8(c2, vget_low_s8(vout2x01234567_3x01234567)); c2 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout2x01234567_3x01234567 = vcombine_s8(vget_high_s8(vout2x0123456789ABCDEF), vget_high_s8(vout3x0123456789ABCDEF));
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-dup.c b/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-dup.c
index 2e3feae..5aef476 100644
--- a/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-dup.c
+++ b/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-dup.c
@@ -113,6 +113,7 @@
         const int8x8_t va2c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 0));
         const int8x8_t va3c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x0), 0));
         const int8x8_t va3c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
@@ -147,6 +148,7 @@
         const int8x8_t va2c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 1));
         const int8x8_t va3c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x0), 1));
         const int8x8_t va3c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
@@ -181,6 +183,7 @@
         const int8x8_t va2c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 2));
         const int8x8_t va3c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x0), 2));
         const int8x8_t va3c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
@@ -215,6 +218,7 @@
         const int8x8_t va2c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 3));
         const int8x8_t va3c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x0), 3));
         const int8x8_t va3c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
diff --git a/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld1r.c b/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld1r.c
new file mode 100644
index 0000000..f92e8d8
--- /dev/null
+++ b/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld1r.c
@@ -0,0 +1,575 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc3x0123 = vacc0x0123;
+    int32x4_t vacc3x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      const int8_t* restrict a3 = a[3];
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va20x0 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21x0 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22x0 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23x0 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+        const int16x4_t va20x1 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21x1 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22x1 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23x1 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+        const int16x4_t va30x0 = vld1_dup_s16((const void*)a3);
+        const int16x4_t va31x0 = vld1_dup_s16((const void*)(a3 + 2));
+        const int16x4_t va32x0 = vld1_dup_s16((const void*)(a3 + 4));
+        const int16x4_t va33x0 = vld1_dup_s16((const void*)(a3 + 6)); a3 += 8;
+        const int16x4_t va30x1 = vld1_dup_s16((const void*)a3);
+        const int16x4_t va31x1 = vld1_dup_s16((const void*)(a3 + 2));
+        const int16x4_t va32x1 = vld1_dup_s16((const void*)(a3 + 4));
+        const int16x4_t va33x1 = vld1_dup_s16((const void*)(a3 + 6)); a3 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
+        const int8x8_t va2c0x0 = vreinterpret_s8_s16(va20x0);
+        const int8x8_t va2c0x1 = vreinterpret_s8_s16(va20x1);
+        const int8x8_t va3c0x0 = vreinterpret_s8_s16(va30x0);
+        const int8x8_t va3c0x1 = vreinterpret_s8_s16(va30x1);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
+        int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0x0, va3c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2c0x1);
+        vprod3x0123c0 = vmlal_s8(vprod3x0123c0, vb0123c0x1, va3c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2c0x0);
+        int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0x0, va3c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2c0x1);
+        vprod3x4567c0 = vmlal_s8(vprod3x4567c0, vb4567c0x1, va3c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
+        const int8x8_t va2c1x0 = vreinterpret_s8_s16(va21x0);
+        const int8x8_t va2c1x1 = vreinterpret_s8_s16(va21x1);
+        const int8x8_t va3c1x0 = vreinterpret_s8_s16(va31x0);
+        const int8x8_t va3c1x1 = vreinterpret_s8_s16(va31x1);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
+        int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1x0, va3c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2c1x1);
+        vprod3x0123c1 = vmlal_s8(vprod3x0123c1, vb0123c1x1, va3c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2c1x0);
+        int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1x0, va3c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2c1x1);
+        vprod3x4567c1 = vmlal_s8(vprod3x4567c1, vb4567c1x1, va3c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
+        const int8x8_t va2c2x0 = vreinterpret_s8_s16(va22x0);
+        const int8x8_t va2c2x1 = vreinterpret_s8_s16(va22x1);
+        const int8x8_t va3c2x0 = vreinterpret_s8_s16(va32x0);
+        const int8x8_t va3c2x1 = vreinterpret_s8_s16(va32x1);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
+        int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2x0, va3c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2c2x1);
+        vprod3x0123c2 = vmlal_s8(vprod3x0123c2, vb0123c2x1, va3c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2c2x0);
+        int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2x0, va3c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2c2x1);
+        vprod3x4567c2 = vmlal_s8(vprod3x4567c2, vb4567c2x1, va3c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
+        const int8x8_t va2c3x0 = vreinterpret_s8_s16(va23x0);
+        const int8x8_t va2c3x1 = vreinterpret_s8_s16(va23x1);
+        const int8x8_t va3c3x0 = vreinterpret_s8_s16(va33x0);
+        const int8x8_t va3c3x1 = vreinterpret_s8_s16(va33x1);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
+        int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3x0, va3c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2c3x1);
+        vprod3x0123c3 = vmlal_s8(vprod3x0123c3, vb0123c3x1, va3c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2c3x0);
+        int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3x0, va3c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2c3x1);
+        vprod3x4567c3 = vmlal_s8(vprod3x4567c3, vb4567c3x1, va3c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va20 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+        const int16x4_t va30 = vld1_dup_s16((const void*)a3);
+        const int16x4_t va31 = vld1_dup_s16((const void*)(a3 + 2));
+        const int16x4_t va32 = vld1_dup_s16((const void*)(a3 + 4));
+        const int16x4_t va33 = vld1_dup_s16((const void*)(a3 + 6)); a3 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(va30);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va21);
+        const int8x8_t va3c1 = vreinterpret_s8_s16(va31);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va22);
+        const int8x8_t va3c2 = vreinterpret_s8_s16(va32);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va23);
+        const int8x8_t va3c3 = vreinterpret_s8_s16(va33);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+        const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+          const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
+          const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+          vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+          const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+          vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+            const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
+            const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+            vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+            const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+            vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+          }
+        }
+      }
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
+      vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld2r.c b/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld2r.c
new file mode 100644
index 0000000..e37d199
--- /dev/null
+++ b/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld2r.c
@@ -0,0 +1,551 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc3x0123 = vacc0x0123;
+    int32x4_t vacc3x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      const int8_t* restrict a3 = a[3];
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va20x0 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21x0 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+        const int16x4x2_t va20x1 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21x1 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+        const int16x4x2_t va30x0 = vld2_dup_s16((const void*)a3);
+        const int16x4x2_t va31x0 = vld2_dup_s16((const void*)(a3 + 4)); a3 += 8;
+        const int16x4x2_t va30x1 = vld2_dup_s16((const void*)a3);
+        const int16x4x2_t va31x1 = vld2_dup_s16((const void*)(a3 + 4)); a3 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
+        const int8x8_t va2c0x0 = vreinterpret_s8_s16(va20x0.val[0]);
+        const int8x8_t va2c0x1 = vreinterpret_s8_s16(va20x1.val[0]);
+        const int8x8_t va3c0x0 = vreinterpret_s8_s16(va30x0.val[0]);
+        const int8x8_t va3c0x1 = vreinterpret_s8_s16(va30x1.val[0]);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
+        int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0x0, va3c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2c0x1);
+        vprod3x0123c0 = vmlal_s8(vprod3x0123c0, vb0123c0x1, va3c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2c0x0);
+        int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0x0, va3c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2c0x1);
+        vprod3x4567c0 = vmlal_s8(vprod3x4567c0, vb4567c0x1, va3c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
+        const int8x8_t va2c1x0 = vreinterpret_s8_s16(va20x0.val[1]);
+        const int8x8_t va2c1x1 = vreinterpret_s8_s16(va20x1.val[1]);
+        const int8x8_t va3c1x0 = vreinterpret_s8_s16(va30x0.val[1]);
+        const int8x8_t va3c1x1 = vreinterpret_s8_s16(va30x1.val[1]);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
+        int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1x0, va3c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2c1x1);
+        vprod3x0123c1 = vmlal_s8(vprod3x0123c1, vb0123c1x1, va3c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2c1x0);
+        int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1x0, va3c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2c1x1);
+        vprod3x4567c1 = vmlal_s8(vprod3x4567c1, vb4567c1x1, va3c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
+        const int8x8_t va2c2x0 = vreinterpret_s8_s16(va21x0.val[0]);
+        const int8x8_t va2c2x1 = vreinterpret_s8_s16(va21x1.val[0]);
+        const int8x8_t va3c2x0 = vreinterpret_s8_s16(va31x0.val[0]);
+        const int8x8_t va3c2x1 = vreinterpret_s8_s16(va31x1.val[0]);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
+        int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2x0, va3c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2c2x1);
+        vprod3x0123c2 = vmlal_s8(vprod3x0123c2, vb0123c2x1, va3c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2c2x0);
+        int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2x0, va3c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2c2x1);
+        vprod3x4567c2 = vmlal_s8(vprod3x4567c2, vb4567c2x1, va3c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
+        const int8x8_t va2c3x0 = vreinterpret_s8_s16(va21x0.val[1]);
+        const int8x8_t va2c3x1 = vreinterpret_s8_s16(va21x1.val[1]);
+        const int8x8_t va3c3x0 = vreinterpret_s8_s16(va31x0.val[1]);
+        const int8x8_t va3c3x1 = vreinterpret_s8_s16(va31x1.val[1]);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
+        int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3x0, va3c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2c3x1);
+        vprod3x0123c3 = vmlal_s8(vprod3x0123c3, vb0123c3x1, va3c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2c3x0);
+        int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3x0, va3c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2c3x1);
+        vprod3x4567c3 = vmlal_s8(vprod3x4567c3, vb4567c3x1, va3c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va20 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+        const int16x4x2_t va30 = vld2_dup_s16((const void*)a3);
+        const int16x4x2_t va31 = vld2_dup_s16((const void*)(a3 + 4)); a3 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20.val[0]);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(va30.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va20.val[1]);
+        const int8x8_t va3c1 = vreinterpret_s8_s16(va30.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va21.val[0]);
+        const int8x8_t va3c2 = vreinterpret_s8_s16(va31.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va21.val[1]);
+        const int8x8_t va3c3 = vreinterpret_s8_s16(va31.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+        const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+          const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
+          const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+          vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+          const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+          vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+            const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
+            const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+            vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+            const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+            vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+          }
+        }
+      }
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
+      vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld4r.c b/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld4r.c
index a957034..c247266 100644
--- a/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld4r.c
+++ b/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld4r.c
@@ -113,6 +113,7 @@
         const int8x8_t va2c0x1 = vreinterpret_s8_s16(va2x1.val[0]);
         const int8x8_t va3c0x0 = vreinterpret_s8_s16(va3x0.val[0]);
         const int8x8_t va3c0x1 = vreinterpret_s8_s16(va3x1.val[0]);
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
@@ -147,6 +148,7 @@
         const int8x8_t va2c1x1 = vreinterpret_s8_s16(va2x1.val[1]);
         const int8x8_t va3c1x0 = vreinterpret_s8_s16(va3x0.val[1]);
         const int8x8_t va3c1x1 = vreinterpret_s8_s16(va3x1.val[1]);
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
@@ -181,6 +183,7 @@
         const int8x8_t va2c2x1 = vreinterpret_s8_s16(va2x1.val[2]);
         const int8x8_t va3c2x0 = vreinterpret_s8_s16(va3x0.val[2]);
         const int8x8_t va3c2x1 = vreinterpret_s8_s16(va3x1.val[2]);
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
@@ -215,6 +218,7 @@
         const int8x8_t va2c3x1 = vreinterpret_s8_s16(va2x1.val[3]);
         const int8x8_t va3c3x0 = vreinterpret_s8_s16(va3x0.val[3]);
         const int8x8_t va3c3x1 = vreinterpret_s8_s16(va3x1.val[3]);
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
diff --git a/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mull-ld1r.c b/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mull-ld1r.c
new file mode 100644
index 0000000..5ac9976
--- /dev/null
+++ b/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mull-ld1r.c
@@ -0,0 +1,389 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc3x0123 = vacc0x0123;
+    int32x4_t vacc3x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      const int8_t* restrict a3 = a[3];
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va20 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+        const int16x4_t va30 = vld1_dup_s16((const void*)a3);
+        const int16x4_t va31 = vld1_dup_s16((const void*)(a3 + 2));
+        const int16x4_t va32 = vld1_dup_s16((const void*)(a3 + 4));
+        const int16x4_t va33 = vld1_dup_s16((const void*)(a3 + 6)); a3 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(va30);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va21);
+        const int8x8_t va3c1 = vreinterpret_s8_s16(va31);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va22);
+        const int8x8_t va3c2 = vreinterpret_s8_s16(va32);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va23);
+        const int8x8_t va3c3 = vreinterpret_s8_s16(va33);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+        const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+          const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
+          const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+          vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+          const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+          vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+            const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
+            const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+            vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+            const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+            vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+          }
+        }
+      }
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
+      vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mull-ld2r.c b/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mull-ld2r.c
new file mode 100644
index 0000000..7ee9d66
--- /dev/null
+++ b/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mull-ld2r.c
@@ -0,0 +1,381 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc3x0123 = vacc0x0123;
+    int32x4_t vacc3x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      const int8_t* restrict a3 = a[3];
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va20 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+        const int16x4x2_t va30 = vld2_dup_s16((const void*)a3);
+        const int16x4x2_t va31 = vld2_dup_s16((const void*)(a3 + 4)); a3 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20.val[0]);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(va30.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va20.val[1]);
+        const int8x8_t va3c1 = vreinterpret_s8_s16(va30.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va21.val[0]);
+        const int8x8_t va3c2 = vreinterpret_s8_s16(va31.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va21.val[1]);
+        const int8x8_t va3c3 = vreinterpret_s8_s16(va31.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+        const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+          const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
+          const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+          vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+          const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+          vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+            const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
+            const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+            vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+            const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+            vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+          }
+        }
+      }
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
+      vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}